python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
import torch
import torch.nn.functional as F
from .conv_utils import conv_backward, conv_args_and_kwargs, conv_picker
from .expanded_weights_impl import ExpandedWeight, implements_per_sample_grads
from .expanded_weights_utils import forward_helper
@implements_per_sample_grads(F.conv1d)
@implements_per_sample_grads(F.conv2d)
@implements_per_sample_grads(F.conv3d)
class ConvPerSampleGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, kwarg_names, conv_fn, *expanded_args_and_kwargs):
if any([isinstance(i, str) for i in expanded_args_and_kwargs]):
raise RuntimeError("Expanded Weights does not support convolution padding as a string. "
"Please file an issue to prioritize support")
expanded_args, expanded_kwargs = conv_args_and_kwargs(kwarg_names, expanded_args_and_kwargs)
output = forward_helper(conv_fn, expanded_args, expanded_kwargs)
input, weight = expanded_args
batched_dim_size = conv_picker(conv_fn, 3, 4, 5)
if input.dim() != batched_dim_size:
raise RuntimeError(f"Expanded Weights only support convolution with batched input, got {conv_fn} with an"
f"unbatched input of dim {input.dim()}, expected input of dim {batched_dim_size}")
ctx.conv_fn = conv_fn
ctx.batch_size = input.shape[0]
ctx.input_required_grad = input.requires_grad
ctx.stride, ctx.padding = expanded_kwargs['stride'], expanded_kwargs['padding']
ctx.dilation, ctx.groups = expanded_kwargs['dilation'], expanded_kwargs['groups']
if isinstance(weight, ExpandedWeight):
ctx.input = input
ctx.weight = weight
ctx.bias = expanded_kwargs['bias']
return output
@staticmethod
def backward(ctx, grad_output):
return conv_backward(ctx.conv_fn, ctx, grad_output)
| pytorch-master | torch/nn/utils/_expanded_weights/conv_expanded_weights.py |
from typing import Optional
import torch
from .expanded_weights_impl import ExpandedWeight
def standard_kwargs(kwarg_names, expanded_args):
r'''Most `__torch_function__`s standardize the kwargs that they give, so this will separate
the args and kwargs they pass. Functions that don't are linear and convND
'''
kwarg_values = expanded_args[len(expanded_args) - len(kwarg_names):]
expanded_args_without_kwargs = expanded_args[:len(expanded_args) - len(kwarg_names)]
expanded_kwargs = {name: value for (name, value) in zip(kwarg_names, kwarg_values)}
return expanded_args_without_kwargs, expanded_kwargs
def forward_helper(func, expanded_args, expanded_kwargs):
r'''Forward helper computes the forward pass for a function that has expanded weight(s)
passed to it. It will run the forward pass where all ExpandedWeights are their original
weight. It runs checks on the given arguments and detaches the outputs.
.. note:: First argument in :attr:`expanded_args` must be the input with the batch
dimension as the first element of the shape
.. note:: :attr:`func` must return a Tensor or tuple of Tensors
Args:
func: The function to be called
ctx: The context from the autograd.Function object. Will be used to save
computed state from the forward pass
expanded_args: Arguments to be passed to :attr:`func`. Will include arguments
that need to be unpacked because they are ExpandedWeights
num_true_outs: The number of outputs seen by the user since some functions
return auxillary data that is only used in the backward pass
'''
unexpanded_args, unexpanded_kwargs = _check_and_unexpand_args(func, expanded_args, expanded_kwargs)
return func(*unexpanded_args, **unexpanded_kwargs)
def _check_and_unexpand_args(func, expanded_args, expanded_kwargs):
# input must be the first argument passed
input = expanded_args[0]
if isinstance(input, ExpandedWeight):
raise RuntimeError("Expanded Weights do not support inputs that are also ExpandedWeights. "
f"Input must be a Tensor, got {type(input).__name__} in function {func.__name__}")
if not isinstance(input, torch.Tensor):
raise RuntimeError("Expanded Weights requires a Tensor as the first input to get the batch dimension, "
f"got {type(input).__name__} in function {func.__name__}")
if len(input.shape) == 0:
raise RuntimeError(f"Expanded Weights requires a batch dimension but got an input of size 0 in function {func.__name__}")
if input.shape[0] == 0:
raise RuntimeError("0 is not a valid batch size for Expanded Weights but got input tensor of "
f"{input} in function {func.__name__}")
batch_size = input.shape[0]
for arg in expanded_args + tuple(expanded_kwargs.values()):
if isinstance(arg, ExpandedWeight) and arg.batch_size != batch_size:
raise RuntimeError("Expected ExpandedWeights to have batch size matching input but got "
f"input batch size of {batch_size} with ExpandedWeight of batch size {arg.batch_size}")
loss_reduction: Optional[str] = None
for arg in expanded_args + tuple(expanded_kwargs.values()):
if isinstance(arg, ExpandedWeight):
if loss_reduction is None:
loss_reduction = arg.loss_reduction
elif loss_reduction != arg.loss_reduction:
raise RuntimeError("Expected ExpandedWeights to all have the same loss_reduction argument but got one"
f"with {loss_reduction} and one with {arg.loss_reduction}")
unexpanded_args = tuple(arg.orig_weight if isinstance(arg, ExpandedWeight) else arg for arg in expanded_args)
unexpanded_kwargs = {name: arg.orig_weight if isinstance(arg, ExpandedWeight) else arg
for (name, arg) in expanded_kwargs.items()}
return unexpanded_args, unexpanded_kwargs
def maybe_scale_by_batch_size(grad_sample, expanded_weight):
if expanded_weight.loss_reduction == "mean":
return grad_sample * expanded_weight.batch_size
else:
return grad_sample
def set_grad_sample_if_exists(maybe_expanded_weight, per_sample_grad_fn):
unpacked = unpack_expanded_weight_or_tensor(maybe_expanded_weight)
if isinstance(maybe_expanded_weight, ExpandedWeight):
grad_sample_contribution = maybe_scale_by_batch_size(per_sample_grad_fn(unpacked), maybe_expanded_weight)
if hasattr(unpacked, "grad_sample") and unpacked.grad_sample is not None:
unpacked.grad_sample = unpacked.grad_sample + grad_sample_contribution
else:
unpacked.grad_sample = grad_sample_contribution
def unpack_expanded_weight_or_tensor(maybe_expanded_weight, func=lambda x: x):
if isinstance(maybe_expanded_weight, ExpandedWeight):
orig_weight = maybe_expanded_weight.orig_weight
return func(orig_weight)
elif isinstance(maybe_expanded_weight, torch.Tensor) and not maybe_expanded_weight.requires_grad:
return func(maybe_expanded_weight)
elif isinstance(maybe_expanded_weight, torch.Tensor):
raise RuntimeError("ExpandedWeights currently does not support a mixture of ExpandedWeight parameters "
"and normal Parameters. Please file and issue with pytorch/pytorch")
def sum_over_all_but_batch_and_last_n(
tensor: torch.Tensor, n_dims: int
) -> torch.Tensor:
r"""
Calculates the sum over all dimensions, except the first
(batch dimension), and excluding the last n_dims.
This function will ignore the first dimension and it will
not aggregate over the last n_dims dimensions.
Args:
tensor: An input tensor of shape ``(B, ..., X[n_dims-1])``.
n_dims: Number of dimensions to keep.
Example:
>>> tensor = torch.ones(1, 2, 3, 4, 5)
>>> sum_over_all_but_batch_and_last_n(tensor, n_dims=2).shape
torch.Size([1, 4, 5])
Returns:
A tensor of shape ``(B, ..., X[n_dims-1])``
"""
if tensor.dim() == n_dims + 1:
return tensor
else:
dims = list(range(1, tensor.dim() - n_dims))
return tensor.sum(dim=dims)
| pytorch-master | torch/nn/utils/_expanded_weights/expanded_weights_utils.py |
from .conv_expanded_weights import ConvPerSampleGrad
from .embedding_expanded_weights import EmbeddingPerSampleGrad
from .group_norm_expanded_weights import GroupNormPerSampleGrad
from .instance_norm_expanded_weights import InstanceNormPerSampleGrad
from .layer_norm_expanded_weights import LayerNormPerSampleGrad
from .linear_expanded_weights import LinearPerSampleGrad
from .expanded_weights_impl import ExpandedWeight
__all__ = ['ExpandedWeight']
| pytorch-master | torch/nn/utils/_expanded_weights/__init__.py |
import torch
import torch.nn.functional as F
from .expanded_weights_impl import implements_per_sample_grads
from .expanded_weights_utils import standard_kwargs, forward_helper, set_grad_sample_if_exists
from typing import List, Optional
@implements_per_sample_grads(F.embedding)
class EmbeddingPerSampleGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs):
expanded_args, expanded_kwargs = standard_kwargs(kwarg_names, expanded_args_and_kwargs)
if len(expanded_args[0].shape) == 1:
raise RuntimeError(f"Expanded Weights needs an input with a batch size, got a 1D tensor, {expanded_args[0]}")
output = forward_helper(F.embedding, expanded_args, expanded_kwargs)
ctx.input, ctx.weight = expanded_args
ctx.padding_idx, ctx.scale_grad_by_freq = expanded_kwargs['padding_idx'], expanded_kwargs['scale_grad_by_freq']
ctx.sparse = expanded_kwargs['sparse']
return output
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.input, ctx.weight
padding_idx, scale_grad_by_freq, sparse = ctx.padding_idx, ctx.scale_grad_by_freq, ctx.sparse
def weight_per_sample_grad(weight):
batch_size = input.shape[0]
embedding_dim = weight.shape[1]
index = (
input.unsqueeze(-1)
.expand(*input.shape, embedding_dim)
.reshape(batch_size, -1, embedding_dim)
)
grad_sample = torch.zeros(
batch_size, *weight.shape, device=weight.device, dtype=grad_output.dtype
)
return grad_sample.scatter_add_(1, index, grad_output.reshape(batch_size, -1, embedding_dim))
results: List[Optional[torch.Tensor]] = []
results.append(None) # for kwarg names
results.append(None) # for op reference
if input.requires_grad:
bw_fn = torch.ops.aten.embedding_backward
results.append(bw_fn(grad_output, input, weight.shape[0], padding_idx, scale_grad_by_freq, sparse))
else:
results.append(None)
# weight doesn't compute batched gradients; no other arguments are differentiable (2 not saved from forward)
results = results + [None] * 6
# set grad_sample field for weight with per sample gradients
set_grad_sample_if_exists(weight, weight_per_sample_grad)
return tuple(results)
| pytorch-master | torch/nn/utils/_expanded_weights/embedding_expanded_weights.py |
import torch
import torch.nn.functional as F
import numpy as np
from typing import List, Optional
from .expanded_weights_utils import \
set_grad_sample_if_exists, unpack_expanded_weight_or_tensor
THRESHOLD = 32
def conv_picker(func, conv1dOpt, conv2dOpt, conv3dOpt):
if func == F.conv1d:
return conv1dOpt
if func == F.conv2d:
return conv2dOpt
else:
assert func == F.conv3d
return conv3dOpt
def conv_args_and_kwargs(kwarg_names, expanded_args_and_kwargs):
args = expanded_args_and_kwargs[:len(expanded_args_and_kwargs) - len(kwarg_names)]
kwargs = expanded_args_and_kwargs[len(expanded_args_and_kwargs) - len(kwarg_names):]
kwargs = {name: arg for (name, arg) in zip(kwarg_names, kwargs)}
return conv_normalizer(*args, **kwargs)
def conv_normalizer(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
return (input, weight), {'bias': bias, 'stride': stride, 'padding': padding, 'dilation': dilation, 'groups': groups}
def conv_backward(func, ctx, grad_output):
def weight_grad_sample(weight):
if (batch_size < THRESHOLD and groups == 1):
return conv_group_weight_grad_sample(ctx.input, grad_output, weight_shape, stride, padding, dilation, batch_size, func)
else:
return conv_unfold_weight_grad_sample(ctx.input, grad_output, weight_shape, kernel_size,
stride, padding, dilation, groups, func)
def expand(param):
if isinstance(param, int):
return conv_picker(func, (param,), (param, param), (param, param, param))
else:
return param
weight_shape = ctx.weight.shape
stride, padding, dilation, groups = expand(ctx.stride), expand(ctx.padding), expand(ctx.dilation), ctx.groups
kernel_size = []
for i in range(2, conv_picker(func, 3, 4, 5)):
kernel_size.append(weight_shape[i])
batch_size = ctx.batch_size
results: List[Optional[torch.Tensor]] = []
results.append(None) # for kwarg names
results.append(None) # for op reference
if ctx.input_required_grad:
output_padding = []
input_dims = conv_picker(func, 1, 2, 3)
for i in range(input_dims):
input_dim = ctx.input.shape[2 + i]
output_padding.append((2 * padding[i] + input_dim - (kernel_size[i] * dilation[i] - dilation[i] + 1)) % stride[i])
weight_ = unpack_expanded_weight_or_tensor(ctx.weight)
transpose_func = conv_picker(func, F.conv_transpose1d, F.conv_transpose2d, F.conv_transpose3d)
results.append(transpose_func(grad_output, weight_, None, stride, padding, tuple(output_padding), groups, dilation))
else:
results.append(None)
# weight and bias don't compute batched gradients; no other arguments are differentiable
results = results + [None] * 6
# set grad_sample field for weight and bias with per sample gradients
set_grad_sample_if_exists(ctx.weight, weight_grad_sample)
set_grad_sample_if_exists(ctx.bias, lambda _: grad_output.reshape(*grad_output.shape[:2], -1).sum(dim=2))
return tuple(results)
def conv_unfold_weight_grad_sample(input, grad_output, weight_shape, kernel_size, stride, padding, dilation, groups, func):
n = input.shape[0]
in_channels = input.shape[1]
unfold_func = conv_picker(
func,
lambda: F.unfold(input.unsqueeze(-2),
kernel_size=(1, kernel_size[0]),
dilation=(1, dilation[0]),
padding=(0, padding[0]),
stride=(1, stride[0])),
lambda: F.unfold(input, kernel_size, dilation=dilation, padding=padding, stride=stride),
lambda: unfold3d(input, kernel_size, padding, stride, dilation)
)
input = unfold_func()
grad_output = grad_output.reshape(n, -1, input.shape[-1])
# n=batch_sz; o=num_out_channels; p=(num_in_channels/groups)*kernel_sz
weight_grad_sample = torch.einsum("noq,npq->nop", grad_output, input)
# rearrange the above tensor and extract diagonals.
weight_grad_sample = weight_grad_sample.view(
n,
groups,
-1,
groups,
int(in_channels / groups),
np.prod(kernel_size),
)
weight_grad_sample = torch.einsum("ngrg...->ngr...", weight_grad_sample).contiguous()
shape = [n] + list(weight_shape)
weight_grad_sample = weight_grad_sample.view(shape)
return weight_grad_sample
def conv_group_weight_grad_sample(input, grad_output, weight_shape, stride, padding, dilation, batch_size, func):
I = input.shape[1]
O = grad_output.shape[1]
input_ = input.transpose(0, 1)
grad_output_ = grad_output.view(grad_output.shape[0] * grad_output.shape[1], 1, *grad_output.shape[2:])
weight_grad_sample = func(input_, grad_output_, None, stride=dilation, padding=padding, dilation=stride, groups=batch_size)
input_dims = conv_picker(func, 3, 4, 5)
for i in range(2, input_dims):
weight_grad_sample = weight_grad_sample.narrow(i, 0, weight_shape[i])
weight_grad_sample = weight_grad_sample.view(I, batch_size, O, *weight_grad_sample.shape[2:])
weight_grad_sample = weight_grad_sample.movedim(0, 2)
return weight_grad_sample
def unfold3d(
tensor,
kernel_size,
padding,
stride,
dilation,
):
r"""
Extracts sliding local blocks from an batched input tensor.
:class:`torch.nn.Unfold` only supports 4D inputs (batched image-like tensors).
This method implements the same action for 5D inputs
Args:
tensor: An input tensor of shape ``(B, C, D, H, W)``.
kernel_size: the size of the sliding blocks
padding: implicit zero padding to be added on both sides of input
stride: the stride of the sliding blocks in the input spatial dimensions
dilation: the spacing between the kernel points.
Returns:
A tensor of shape ``(B, C * np.product(kernel_size), L)``, where L - output spatial dimensions.
See :class:`torch.nn.Unfold` for more details
Example:
>>> B, C, D, H, W = 3, 4, 5, 6, 7
>>> tensor = torch.arange(1, B*C*D*H*W + 1.).view(B, C, D, H, W)
>>> # xdoctest: +SKIP
>>> unfold3d(tensor, kernel_size=2, padding=0, stride=1).shape
torch.Size([3, 32, 120])
"""
if len(tensor.shape) != 5:
raise ValueError(
f"Input tensor must be of the shape [B, C, D, H, W]. Got{tensor.shape}"
)
if dilation != (1, 1, 1):
raise NotImplementedError(f"dilation={dilation} not supported.")
batch_size, channels, _, _, _ = tensor.shape
# Input shape: (B, C, D, H, W)
tensor = F.pad(
tensor, (padding[2], padding[2], padding[1], padding[1], padding[0], padding[0])
)
# Output shape: (B, C, D+2*padding[2], H+2*padding[1], W+2*padding[0])
tensor = tensor.unfold(dimension=2, size=kernel_size[0], step=stride[0])
tensor = tensor.unfold(dimension=3, size=kernel_size[1], step=stride[1])
tensor = tensor.unfold(dimension=4, size=kernel_size[2], step=stride[2])
# Output shape: (B, C, D_out, H_out, W_out, kernel_size[0], kernel_size[1], kernel_size[2])
# For D_out, H_out, W_out definitions see :class:`torch.nn.Unfold`
tensor = tensor.permute(0, 2, 3, 4, 1, 5, 6, 7)
# Output shape: (B, D_out, H_out, W_out, C, kernel_size[0], kernel_size[1], kernel_size[2])
tensor = tensor.reshape(batch_size, -1, channels * np.prod(kernel_size)).transpose(
1, 2
)
# Output shape: (B, D_out * H_out * W_out, C * kernel_size[0] * kernel_size[1] * kernel_size[2]
return tensor
| pytorch-master | torch/nn/utils/_expanded_weights/conv_utils.py |
from functools import partial
import torch
import torch.nn.functional as F
from .expanded_weights_impl import implements_per_sample_grads
from .expanded_weights_utils import \
forward_helper, set_grad_sample_if_exists, standard_kwargs, unpack_expanded_weight_or_tensor
from typing import List, Optional
@implements_per_sample_grads(F.instance_norm)
class InstanceNormPerSampleGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs):
instance_norm = partial(torch.instance_norm, cudnn_enabled=True)
expanded_args, expanded_kwargs = standard_kwargs(kwarg_names, expanded_args_and_kwargs)
output = forward_helper(instance_norm, expanded_args, expanded_kwargs)
ctx.input = expanded_args[0]
ctx.running_mean, ctx.running_var = expanded_kwargs['running_mean'], expanded_kwargs['running_var']
ctx.weight, ctx.bias, ctx.eps = expanded_kwargs['weight'], expanded_kwargs['bias'], expanded_kwargs['eps']
return output
@staticmethod
def backward(ctx, grad_output):
input, running_mean, running_var = ctx.input, ctx.running_mean, ctx.running_var
weight, bias, eps = ctx.weight, ctx.bias, ctx.eps
results: List[Optional[torch.Tensor]] = []
results.append(None) # for kwarg names
results.append(None) # for op reference
if input.requires_grad:
b = input.shape[0]
c = input.shape[1]
new_shape = (1, b * c, *input.shape[2:])
weight_ = unpack_expanded_weight_or_tensor(weight, lambda orig_weight: orig_weight.repeat(b))
running_mean_ = running_mean.repeat(b) if running_mean is not None else None
running_var_ = running_var.repeat(b) if running_var is not None else None
input_reshaped = input.contiguous().view(new_shape)
grad_output_reshaped = grad_output.contiguous().view(new_shape)
mean = torch.mean(input_reshaped, (0,) + tuple(range(2, input.dim())), False)
var = torch.var(input_reshaped, (0,) + tuple(range(2, input.dim())), keepdim=False, unbiased=False)
rstd = 1 / torch.sqrt(var + eps)
# must use native batch norm since it supports all inputs. This may have used cuda or openmi during the forward but
# it didn't save the metadata, so we don't know during the backward
res = torch.ops.aten.native_batch_norm_backward(
grad_output_reshaped, input_reshaped, weight_, running_mean_, running_var_,
mean, rstd, True, eps, (True, False, False))
results.append(res[0].reshape(input.shape))
else:
results.append(None)
# weight and bias don't compute batched gradients; no other arguments are differentiable (2 are not saved from the forward)
results = results + [None] * 7
# set grad_sample field for weight and bias with per sample gradients
set_grad_sample_if_exists(weight,
lambda _: torch.einsum("ni...->ni", F.instance_norm(input, eps=eps) * grad_output))
set_grad_sample_if_exists(bias, lambda _: torch.einsum("ni...->ni", grad_output))
return tuple(results)
| pytorch-master | torch/nn/utils/_expanded_weights/instance_norm_expanded_weights.py |
import torch
import torch.nn.functional as F
from .expanded_weights_impl import implements_per_sample_grads
from .expanded_weights_utils import \
forward_helper, set_grad_sample_if_exists, unpack_expanded_weight_or_tensor
from typing import List, Optional
@implements_per_sample_grads(F.linear)
class LinearPerSampleGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, _, __, *expanded_args_and_kwargs):
if len(expanded_args_and_kwargs[0].shape) <= 1:
raise RuntimeError("Input does not have a batch dimension. Expanded Weights expected input "
f"of at least rank 2, got of rank {len(expanded_args_and_kwargs[0].shape)}")
expanded_kwargs = {'bias': expanded_args_and_kwargs[2] if len(expanded_args_and_kwargs) == 3 else None}
expanded_args = expanded_args_and_kwargs[:2]
output = forward_helper(F.linear, expanded_args, expanded_kwargs)
ctx.args = expanded_args
ctx.kwargs = expanded_kwargs
return output
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.args
bias = ctx.kwargs['bias']
results: List[Optional[torch.Tensor]] = []
results.append(None) # for kwarg_names
results.append(None) # for op reference
if input.requires_grad:
results.append(grad_output.matmul(unpack_expanded_weight_or_tensor(weight)))
else:
results.append(None)
results.extend([None] * 2) # weight and bias don't compute batched gradients
# weight and bias get their grad_sample fields set directly if they exist
set_grad_sample_if_exists(weight, lambda _: torch.einsum("n...i,n...j->nij", grad_output, input))
set_grad_sample_if_exists(bias, lambda _: torch.einsum("n...k->nk", grad_output))
return tuple(results)
| pytorch-master | torch/nn/utils/_expanded_weights/linear_expanded_weights.py |
from torch._C import _TensorBase
import torch
import functools
from typing import Callable, Dict, cast
HANDLED_FUNCTIONS: Dict[Callable, torch.autograd.Function] = {}
def implements_per_sample_grads(torch_function):
@functools.wraps(torch_function)
def decorator(autograd_func):
HANDLED_FUNCTIONS[torch_function] = autograd_func
return autograd_func
return decorator
# ExpandedWeight represents a weight (parameter) Tensor that has an expanded
# batch dimension. Operations on the ExpandedWeight Tensor act exactly like
# those without an expanded batch dimension but a call to .backward() populates
# the original (unexpanded) tensor with per-sample-gradients for in the grad_sample field
#
# ExpandedWeight has a fallback that always fails since we cannot know what the batch
# dimension of the input tensor is and therefore cannot know if this is a valid call
#
# This is a __torch_function__ object but it could have also been a Tensor Extension
# with a dispatch key.
#
# Needs to be a tensor subclass to allow reparamaterization
class ExpandedWeight(torch.Tensor):
def __init__(self, orig_weight, batch_size, loss_reduction):
self.batch_size = batch_size
self.orig_weight = orig_weight
self.loss_reduction = loss_reduction
handled_functions = HANDLED_FUNCTIONS
def __new__(cls, orig_weight, batch_size, loss_reduction):
if not isinstance(orig_weight, torch.Tensor):
raise RuntimeError(f"Can only make Expanded Weights of Tensors, got {type(orig_weight).__name__}")
if not orig_weight.requires_grad:
raise RuntimeError("Can only build ExpandedWeights objects of tensors that require_grad")
ret = torch.Tensor._make_subclass(cast(_TensorBase, cls), orig_weight, True)
return ret
@classmethod
def __torch_function__(cls, func, _, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func in cls.handled_functions:
return cls.handled_functions[func].apply(tuple(kwargs.keys()), func, *(args + tuple(kwargs.values())))
# We cannot use a fallback here because we do not know the batch dimension for any regular tensor inputs,
# i.e. torch.add(torch.Tensor, ExpandedWeight)
raise RuntimeError(f"Expanded Weights encountered but cannot handle function {func.__name__}")
@property
def dtype(self):
return self.orig_weight.dtype
@property
def shape(self):
return self.orig_weight.shape
| pytorch-master | torch/nn/utils/_expanded_weights/expanded_weights_impl.py |
from .modules import * # noqa: F403
| pytorch-master | torch/nn/quantizable/__init__.py |
from .activation import MultiheadAttention
from .rnn import LSTM
from .rnn import LSTMCell
__all__ = [
'LSTM',
'LSTMCell',
'MultiheadAttention',
]
| pytorch-master | torch/nn/quantizable/modules/__init__.py |
import torch
import torch.jit # this is needed to avoid a circular import
from torch import nn
import torch.nn.functional as nnF
from torch import Tensor
from typing import Optional, Tuple
import warnings
class MultiheadAttention(nn.MultiheadAttention):
_FLOAT_MODULE = nn.MultiheadAttention
r"""Quantizable implementation of the MultiheadAttention.
Note::
Please, refer to :class:`~torch.nn.MultiheadAttention` for more
information
Allows the model to jointly attend to information from different
representation subspaces.
See reference: Attention Is All You Need
The original MHA module is not quantizable.
This reimplements it by explicitly instantiating the linear layers.
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
to :attr:`embed_dim` such that query, key, and value have the same
number of features.
Examples::
>>> import torch.nn.quantizable as nnqa
>>> multihead_attn = nnqa.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
Note::
Please, follow the quantization flow to convert the quantizable MHA.
"""
__constants__ = ['batch_first']
def __init__(self, embed_dim: int, num_heads: int,
dropout: float = 0., bias: bool = True,
add_bias_kv: bool = False, add_zero_attn: bool = False,
kdim: int = None, vdim: int = None, batch_first: bool = False,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(MultiheadAttention, self).__init__(embed_dim, num_heads, dropout,
bias, add_bias_kv,
add_zero_attn, kdim, vdim, batch_first,
**factory_kwargs)
self.linear_Q = nn.Linear(self.embed_dim, self.embed_dim, bias=bias, **factory_kwargs)
self.linear_K = nn.Linear(self.kdim, self.embed_dim, bias=bias, **factory_kwargs)
self.linear_V = nn.Linear(self.vdim, self.embed_dim, bias=bias, **factory_kwargs)
# for the type: ignore, see https://github.com/pytorch/pytorch/issues/58969
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=bias, **factory_kwargs) # type: ignore[assignment]
# Functionals
self.q_scaling_product = torch.nn.quantized.FloatFunctional()
# note: importing torch.nn.quantized at top creates a circular import
# Quant/Dequant
self.quant_attn_output = torch.ao.quantization.QuantStub()
self.quant_attn_output_weights = torch.ao.quantization.QuantStub()
self.dequant_q = torch.ao.quantization.DeQuantStub()
self.dequant_k = torch.ao.quantization.DeQuantStub()
self.dequant_v = torch.ao.quantization.DeQuantStub()
def _get_name(self):
return 'QuantizableMultiheadAttention'
@classmethod
def from_float(cls, other):
assert type(other) == cls._FLOAT_MODULE
assert hasattr(other, 'qconfig'), "The float module must have 'qconfig'"
# Setting the dropout to 0.0!
observed = cls(other.embed_dim, other.num_heads, other.dropout,
(other.in_proj_bias is not None),
(other.bias_k is not None),
other.add_zero_attn, other.kdim, other.vdim)
observed.bias_k = other.bias_k
observed.bias_v = other.bias_v
observed.qconfig = other.qconfig
# Set the linear weights
# for the type: ignores, see https://github.com/pytorch/pytorch/issues/58969
observed.out_proj.weight = other.out_proj.weight # type: ignore[has-type]
observed.out_proj.bias = other.out_proj.bias # type: ignore[has-type]
if other._qkv_same_embed_dim:
# Use separate params
bias = other.in_proj_bias
_start = 0
_end = _start + other.embed_dim
weight = other.in_proj_weight[_start:_end, :]
if bias is not None:
bias = torch.nn.Parameter(bias[_start:_end], bias.requires_grad)
observed.linear_Q.weight = torch.nn.Parameter(weight,
weight.requires_grad)
observed.linear_Q.bias = bias
bias = other.in_proj_bias
_start = _end
_end = _start + other.embed_dim
weight = other.in_proj_weight[_start:_end, :]
if bias is not None:
bias = torch.nn.Parameter(bias[_start:_end], bias.requires_grad)
observed.linear_K.weight = torch.nn.Parameter(weight,
weight.requires_grad)
observed.linear_K.bias = bias
bias = other.in_proj_bias
_start = _end
weight = other.in_proj_weight[_start:, :]
if bias is not None:
bias = torch.nn.Parameter(bias[_start:], bias.requires_grad)
observed.linear_V.weight = torch.nn.Parameter(weight,
weight.requires_grad)
observed.linear_V.bias = bias
else:
observed.linear_Q.weight = nn.Parameter(other.q_proj_weight)
observed.linear_K.weight = nn.Parameter(other.k_proj_weight)
observed.linear_V.weight = nn.Parameter(other.v_proj_weight)
if other.in_proj_bias is None:
observed.linear_Q.bias = None # type: ignore[assignment]
observed.linear_K.bias = None # type: ignore[assignment]
observed.linear_V.bias = None # type: ignore[assignment]
else:
observed.linear_Q.bias = nn.Parameter(other.in_proj_bias[0:other.embed_dim])
observed.linear_K.bias = nn.Parameter(other.in_proj_bias[other.embed_dim:(other.embed_dim * 2)])
observed.linear_V.bias = nn.Parameter(other.in_proj_bias[(other.embed_dim * 2):])
observed.eval()
# Explicit prepare
observed = torch.ao.quantization.prepare(observed, inplace=True)
return observed
@torch.jit.unused
def dequantize(self):
r"""Utility to convert the quantized MHA back to float.
The motivation for this is that it is not trivial to conver the weights
from the format that is used in the quantized version back to the
float.
"""
fp = self._FLOAT_MODULE(self.embed_dim, self.num_heads, self.dropout,
(self.in_proj_bias is not None),
(self.bias_k is not None),
self.add_zero_attn, self.kdim, self.vdim, self.batch_first)
assert fp._qkv_same_embed_dim == self._qkv_same_embed_dim
if self.bias_k is not None:
fp.bias_k = nn.Parameter(self.bias_k.dequantize())
if self.bias_v is not None:
fp.bias_v = nn.Parameter(self.bias_v.dequantize())
# Set the linear weights
# Note: Because the linear layers are quantized, mypy does not nkow how
# to deal with them -- might need to ignore the typing checks.
# for the type: ignore[has-type], see https://github.com/pytorch/pytorch/issues/58969
w, b = self.out_proj._weight_bias() # type: ignore[operator, has-type]
fp.out_proj.weight = nn.Parameter(w.dequantize())
if b is not None:
fp.out_proj.bias = nn.Parameter(b)
wQ, bQ = self.linear_Q._weight_bias() # type: ignore[operator]
wQ = wQ.dequantize()
wK, bK = self.linear_K._weight_bias() # type: ignore[operator]
wK = wK.dequantize()
wV, bV = self.linear_V._weight_bias() # type: ignore[operator]
wV = wV.dequantize()
if fp._qkv_same_embed_dim:
# Use separate params
_start = 0
_end = _start + fp.embed_dim
fp.in_proj_weight[_start:_end, :] = wQ
if fp.in_proj_bias is not None:
assert all(bQ == 0)
fp.in_proj_bias[_start:_end] = bQ
_start = _end
_end = _start + fp.embed_dim
fp.in_proj_weight[_start:_end, :] = wK
if fp.in_proj_bias is not None:
assert all(bK == 0)
fp.in_proj_bias[_start:_end] = bK
_start = _end
fp.in_proj_weight[_start:, :] = wV
if fp.in_proj_bias is not None:
assert all(bV == 0)
fp.in_proj_bias[_start:] = bV
else:
fp.q_proj_weight = nn.Parameter(wQ)
fp.k_proj_weight = nn.Parameter(wK)
fp.v_proj_weight = nn.Parameter(wV)
if fp.in_proj_bias is None:
self.linear_Q.bias = None
self.linear_K.bias = None
self.linear_V.bias = None
else:
fp.in_proj_bias[0:fp.embed_dim] = bQ
fp.in_proj_bias[fp.embed_dim:(fp.embed_dim * 2)] = bK
fp.in_proj_bias[(fp.embed_dim * 2):] = bV
return fp
@classmethod
def from_observed(cls, other):
# The whole flow is float -> observed -> quantized
# This class does float -> observed only
# See nn.quantized.MultiheadAttention
raise NotImplementedError("It looks like you are trying to prepare an "
"MHA module. Please, see "
"the examples on quantizable MHAs.")
def forward(self,
query: Tensor,
key: Tensor,
value: Tensor,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
average_attn_weights: bool = True) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Note::
Please, refer to :func:`~torch.nn.MultiheadAttention.forward` for more
information
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension. :math:`(N, L, E)` if ``batch_first`` is ``True``.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension. :math:`(N, S, E)` if ``batch_first`` is ``True``.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension. :math:`(N, S, E)` if ``batch_first`` is ``True``.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
effect when ``need_weights=True.``. Default: True (i.e. average weights across heads)
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension. :math:`(N, L, E)` if ``batch_first`` is ``True``.
- attn_output_weights: If ``average_attn_weights=True``, returns attention weights averaged
across heads of shape :math:`(N, L, S)`, where N is the batch size, L is the target sequence length,
S is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
head of shape :math:`(N, num_heads, L, S)`.
"""
return self._forward_impl(query, key, value, key_padding_mask,
need_weights, attn_mask, average_attn_weights)
def _forward_impl(self,
query: Tensor,
key: Tensor,
value: Tensor,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
average_attn_weights: bool = True) -> Tuple[Tensor, Optional[Tensor]]:
# This version will not deal with the static key/value pairs.
# Keeping it here for future changes.
#
# TODO: This method has some duplicate lines with the
# `torch.nn.functional.multi_head_attention`. Will need to refactor.
static_k = None
static_v = None
if self.batch_first:
query, key, value = [x.transpose(0, 1) for x in (query, key, value)]
tgt_len, bsz, embed_dim_to_check = query.size()
assert self.embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = self.embed_dim // self.num_heads
assert head_dim * self.num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
q = self.linear_Q(query)
k = self.linear_K(key)
v = self.linear_V(value)
q = self.q_scaling_product.mul_scalar(q, scaling)
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * self.num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
if self.bias_k is not None and self.bias_v is not None:
if static_k is None and static_v is None:
# Explicitly assert that bias_k and bias_v are not None
# in a way that TorchScript can understand.
bias_k = self.bias_k
assert bias_k is not None
bias_v = self.bias_v
assert bias_v is not None
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = nnF.pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = nnF.pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert self.bias_k is None
assert self.bias_v is None
q = q.contiguous().view(tgt_len, bsz * self.num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * self.num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * self.num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
src_len += 1
k_zeros = torch.zeros((k.size(0), 1) + k.size()[2:])
if k.is_quantized:
k_zeros = torch.quantize_per_tensor(k_zeros, k.q_scale(), k.q_zero_point(), k.dtype)
k = torch.cat([k, k_zeros], dim=1)
v_zeros = torch.zeros((v.size(0), 1) + k.size()[2:])
if v.is_quantized:
v_zeros = torch.quantize_per_tensor(v_zeros, v.q_scale(), v.q_zero_point(), v.dtype)
v = torch.cat([v, v_zeros], dim=1)
if attn_mask is not None:
attn_mask = nnF.pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = nnF.pad(key_padding_mask, (0, 1))
# Leaving the quantized zone here
q = self.dequant_q(q)
k = self.dequant_k(k)
v = self.dequant_v(v)
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_output_weights = nnF.softmax(
attn_output_weights, dim=-1)
attn_output_weights = nnF.dropout(attn_output_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * self.num_heads, tgt_len, head_dim]
if self.batch_first:
attn_output = attn_output.view(bsz, tgt_len, self.embed_dim)
else:
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim)
# Reentering the quantized zone
attn_output = self.quant_attn_output(attn_output)
# for the type: ignore[has-type], see https://github.com/pytorch/pytorch/issues/58969
attn_output = self.out_proj(attn_output) # type: ignore[has-type]
attn_output_weights = self.quant_attn_output_weights(attn_output_weights)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, self.num_heads, tgt_len, src_len)
if average_attn_weights:
attn_output_weights = attn_output_weights.mean(dim=1)
return attn_output, attn_output_weights
else:
return attn_output, None
| pytorch-master | torch/nn/quantizable/modules/activation.py |
import numbers
from typing import Optional, Tuple
import warnings
import torch
from torch import Tensor
"""
We will recreate all the RNN modules as we require the modules to be decomposed
into its building blocks to be able to observe.
"""
class LSTMCell(torch.nn.Module):
r"""A quantizable long short-term memory (LSTM) cell.
For the description and the argument types, please, refer to :class:`~torch.nn.LSTMCell`
Examples::
>>> import torch.nn.quantizable as nnqa
>>> rnn = nnqa.LSTMCell(10, 20)
>>> input = torch.randn(6, 10)
>>> hx = torch.randn(3, 20)
>>> cx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
... hx, cx = rnn(input[i], (hx, cx))
... output.append(hx)
"""
_FLOAT_MODULE = torch.nn.LSTMCell
def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.input_size = input_dim
self.hidden_size = hidden_dim
self.bias = bias
self.igates = torch.nn.Linear(input_dim, 4 * hidden_dim, bias=bias, **factory_kwargs)
self.hgates = torch.nn.Linear(hidden_dim, 4 * hidden_dim, bias=bias, **factory_kwargs)
self.gates = torch.nn.quantized.FloatFunctional()
self.fgate_cx = torch.nn.quantized.FloatFunctional()
self.igate_cgate = torch.nn.quantized.FloatFunctional()
self.fgate_cx_igate_cgate = torch.nn.quantized.FloatFunctional()
self.ogate_cy = torch.nn.quantized.FloatFunctional()
def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
if hidden is None or hidden[0] is None or hidden[1] is None:
hidden = self.initialize_hidden(x.shape[0], x.is_quantized)
hx, cx = hidden
igates = self.igates(x)
hgates = self.hgates(hx)
gates = self.gates.add(igates, hgates)
input_gate, forget_gate, cell_gate, out_gate = gates.chunk(4, 1)
input_gate = torch.sigmoid(input_gate)
forget_gate = torch.sigmoid(forget_gate)
cell_gate = torch.tanh(cell_gate)
out_gate = torch.sigmoid(out_gate)
fgate_cx = self.fgate_cx.mul(forget_gate, cx)
igate_cgate = self.igate_cgate.mul(input_gate, cell_gate)
fgate_cx_igate_cgate = self.fgate_cx_igate_cgate.add(fgate_cx, igate_cgate)
cy = fgate_cx_igate_cgate
tanh_cy = torch.tanh(cy)
hy = self.ogate_cy.mul(out_gate, tanh_cy)
return hy, cy
def initialize_hidden(self, batch_size: int, is_quantized: bool = False) -> Tuple[Tensor, Tensor]:
h, c = torch.zeros((batch_size, self.hidden_size)), torch.zeros((batch_size, self.hidden_size))
if is_quantized:
h = torch.quantize_per_tensor(h, scale=1.0, zero_point=0, dtype=torch.quint8)
c = torch.quantize_per_tensor(c, scale=1.0, zero_point=0, dtype=torch.quint8)
return h, c
def _get_name(self):
return 'QuantizableLSTMCell'
@classmethod
def from_params(cls, wi, wh, bi=None, bh=None):
"""Uses the weights and biases to create a new LSTM cell.
Args:
wi, wh: Weights for the input and hidden layers
bi, bh: Biases for the input and hidden layers
"""
assert (bi is None) == (bh is None) # Either both None or both have values
input_size = wi.shape[1]
hidden_size = wh.shape[1]
cell = cls(input_dim=input_size, hidden_dim=hidden_size,
bias=(bi is not None))
cell.igates.weight = torch.nn.Parameter(wi)
if bi is not None:
cell.igates.bias = torch.nn.Parameter(bi)
cell.hgates.weight = torch.nn.Parameter(wh)
if bh is not None:
cell.hgates.bias = torch.nn.Parameter(bh)
return cell
@classmethod
def from_float(cls, other):
assert type(other) == cls._FLOAT_MODULE
assert hasattr(other, 'qconfig'), "The float module must have 'qconfig'"
observed = cls.from_params(other.weight_ih, other.weight_hh,
other.bias_ih, other.bias_hh)
observed.qconfig = other.qconfig
observed.igates.qconfig = other.qconfig
observed.hgates.qconfig = other.qconfig
return observed
class _LSTMSingleLayer(torch.nn.Module):
r"""A single one-directional LSTM layer.
The difference between a layer and a cell is that the layer can process a
sequence, while the cell only expects an instantaneous value.
"""
def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.cell = LSTMCell(input_dim, hidden_dim, bias=bias, **factory_kwargs)
def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
result = []
for xx in x:
hidden = self.cell(xx, hidden)
result.append(hidden[0]) # type: ignore[index]
result_tensor = torch.stack(result, 0)
return result_tensor, hidden
@classmethod
def from_params(cls, *args, **kwargs):
cell = LSTMCell.from_params(*args, **kwargs)
layer = cls(cell.input_size, cell.hidden_size, cell.bias)
layer.cell = cell
return layer
class _LSTMLayer(torch.nn.Module):
r"""A single bi-directional LSTM layer."""
def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True,
batch_first: bool = False, bidirectional: bool = False,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.batch_first = batch_first
self.bidirectional = bidirectional
self.layer_fw = _LSTMSingleLayer(input_dim, hidden_dim, bias=bias, **factory_kwargs)
if self.bidirectional:
self.layer_bw = _LSTMSingleLayer(input_dim, hidden_dim, bias=bias, **factory_kwargs)
def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
if self.batch_first:
x = x.transpose(0, 1)
if hidden is None:
hx_fw, cx_fw = (None, None)
else:
hx_fw, cx_fw = hidden
hidden_bw: Optional[Tuple[Tensor, Tensor]] = None
if self.bidirectional:
if hx_fw is None:
hx_bw = None
else:
hx_bw = hx_fw[1]
hx_fw = hx_fw[0]
if cx_fw is None:
cx_bw = None
else:
cx_bw = cx_fw[1]
cx_fw = cx_fw[0]
if hx_bw is not None and cx_bw is not None:
hidden_bw = hx_bw, cx_bw
if hx_fw is None and cx_fw is None:
hidden_fw = None
else:
hidden_fw = torch.jit._unwrap_optional(hx_fw), torch.jit._unwrap_optional(cx_fw)
result_fw, hidden_fw = self.layer_fw(x, hidden_fw)
if hasattr(self, 'layer_bw') and self.bidirectional:
x_reversed = x.flip(0)
result_bw, hidden_bw = self.layer_bw(x_reversed, hidden_bw)
result_bw = result_bw.flip(0)
result = torch.cat([result_fw, result_bw], result_fw.dim() - 1)
if hidden_fw is None and hidden_bw is None:
h = None
c = None
elif hidden_fw is None:
(h, c) = torch.jit._unwrap_optional(hidden_bw)
elif hidden_bw is None:
(h, c) = torch.jit._unwrap_optional(hidden_fw)
else:
h = torch.stack([hidden_fw[0], hidden_bw[0]], 0) # type: ignore[list-item]
c = torch.stack([hidden_fw[1], hidden_bw[1]], 0) # type: ignore[list-item]
else:
result = result_fw
h, c = torch.jit._unwrap_optional(hidden_fw) # type: ignore[assignment]
if self.batch_first:
result.transpose_(0, 1)
return result, (h, c)
@classmethod
def from_float(cls, other, layer_idx=0, qconfig=None, **kwargs):
r"""
There is no FP equivalent of this class. This function is here just to
mimic the behavior of the `prepare` within the `torch.ao.quantization`
flow.
"""
assert hasattr(other, 'qconfig') or (qconfig is not None)
input_size = kwargs.get('input_size', other.input_size)
hidden_size = kwargs.get('hidden_size', other.hidden_size)
bias = kwargs.get('bias', other.bias)
batch_first = kwargs.get('batch_first', other.batch_first)
bidirectional = kwargs.get('bidirectional', other.bidirectional)
layer = cls(input_size, hidden_size, bias, batch_first, bidirectional)
layer.qconfig = getattr(other, 'qconfig', qconfig)
wi = getattr(other, f'weight_ih_l{layer_idx}')
wh = getattr(other, f'weight_hh_l{layer_idx}')
bi = getattr(other, f'bias_ih_l{layer_idx}', None)
bh = getattr(other, f'bias_hh_l{layer_idx}', None)
layer.layer_fw = _LSTMSingleLayer.from_params(wi, wh, bi, bh)
if other.bidirectional:
wi = getattr(other, f'weight_ih_l{layer_idx}_reverse')
wh = getattr(other, f'weight_hh_l{layer_idx}_reverse')
bi = getattr(other, f'bias_ih_l{layer_idx}_reverse', None)
bh = getattr(other, f'bias_hh_l{layer_idx}_reverse', None)
layer.layer_bw = _LSTMSingleLayer.from_params(wi, wh, bi, bh)
return layer
class LSTM(torch.nn.Module):
r"""A quantizable long short-term memory (LSTM).
For the description and the argument types, please, refer to :class:`~torch.nn.LSTM`
Attributes:
layers : instances of the `_LSTMLayer`
.. note::
To access the weights and biases, you need to access them per layer.
See examples below.
Examples::
>>> import torch.nn.quantizable as nnqa
>>> rnn = nnqa.LSTM(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> c0 = torch.randn(2, 3, 20)
>>> output, (hn, cn) = rnn(input, (h0, c0))
>>> # To get the weights:
>>> # xdoctest: +SKIP
>>> print(rnn.layers[0].weight_ih)
tensor([[...]])
>>> print(rnn.layers[0].weight_hh)
AssertionError: There is no reverse path in the non-bidirectional layer
"""
_FLOAT_MODULE = torch.nn.LSTM
def __init__(self, input_size: int, hidden_size: int,
num_layers: int = 1, bias: bool = True,
batch_first: bool = False, dropout: float = 0.,
bidirectional: bool = False,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = float(dropout)
self.bidirectional = bidirectional
self.training = False # We don't want to train using this module
num_directions = 2 if bidirectional else 1
if not isinstance(dropout, numbers.Number) or not 0 <= dropout <= 1 or \
isinstance(dropout, bool):
raise ValueError("dropout should be a number in range [0, 1] "
"representing the probability of an element being "
"zeroed")
if dropout > 0:
warnings.warn("dropout option for quantizable LSTM is ignored. "
"If you are training, please, use nn.LSTM version "
"followed by `prepare` step.")
if num_layers == 1:
warnings.warn("dropout option adds dropout after all but last "
"recurrent layer, so non-zero dropout expects "
"num_layers greater than 1, but got dropout={} "
"and num_layers={}".format(dropout, num_layers))
layers = [_LSTMLayer(self.input_size, self.hidden_size,
self.bias, batch_first=False,
bidirectional=self.bidirectional, **factory_kwargs)]
for layer in range(1, num_layers):
layers.append(_LSTMLayer(self.hidden_size, self.hidden_size,
self.bias, batch_first=False,
bidirectional=self.bidirectional,
**factory_kwargs))
self.layers = torch.nn.ModuleList(layers)
def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
if self.batch_first:
x = x.transpose(0, 1)
max_batch_size = x.size(1)
num_directions = 2 if self.bidirectional else 1
if hidden is None:
zeros = torch.zeros(num_directions, max_batch_size,
self.hidden_size, dtype=torch.float,
device=x.device)
zeros.squeeze_(0)
if x.is_quantized:
zeros = torch.quantize_per_tensor(zeros, scale=1.0,
zero_point=0, dtype=x.dtype)
hxcx = [(zeros, zeros) for _ in range(self.num_layers)]
else:
hidden_non_opt = torch.jit._unwrap_optional(hidden)
if isinstance(hidden_non_opt[0], Tensor):
hx = hidden_non_opt[0].reshape(self.num_layers, num_directions,
max_batch_size,
self.hidden_size).unbind(0)
cx = hidden_non_opt[1].reshape(self.num_layers, num_directions,
max_batch_size,
self.hidden_size).unbind(0)
hxcx = [(hx[idx].squeeze_(0), cx[idx].squeeze_(0)) for idx in range(self.num_layers)]
else:
hxcx = hidden_non_opt
hx_list = []
cx_list = []
for idx, layer in enumerate(self.layers):
x, (h, c) = layer(x, hxcx[idx])
hx_list.append(torch.jit._unwrap_optional(h))
cx_list.append(torch.jit._unwrap_optional(c))
hx_tensor = torch.stack(hx_list)
cx_tensor = torch.stack(cx_list)
# We are creating another dimension for bidirectional case
# need to collapse it
hx_tensor = hx_tensor.reshape(-1, hx_tensor.shape[-2], hx_tensor.shape[-1])
cx_tensor = cx_tensor.reshape(-1, cx_tensor.shape[-2], cx_tensor.shape[-1])
if self.batch_first:
x = x.transpose(0, 1)
return x, (hx_tensor, cx_tensor)
def _get_name(self):
return 'QuantizableLSTM'
@classmethod
def from_float(cls, other, qconfig=None):
assert isinstance(other, cls._FLOAT_MODULE)
assert (hasattr(other, 'qconfig') or qconfig)
observed = cls(other.input_size, other.hidden_size, other.num_layers,
other.bias, other.batch_first, other.dropout,
other.bidirectional)
observed.qconfig = getattr(other, 'qconfig', qconfig)
for idx in range(other.num_layers):
observed.layers[idx] = _LSTMLayer.from_float(other, idx, qconfig,
batch_first=False)
observed.eval()
observed = torch.ao.quantization.prepare(observed, inplace=True)
return observed
@classmethod
def from_observed(cls, other):
# The whole flow is float -> observed -> quantized
# This class does float -> observed only
raise NotImplementedError("It looks like you are trying to convert a "
"non-quantizable LSTM module. Please, see "
"the examples on quantizable LSTMs.")
| pytorch-master | torch/nn/quantizable/modules/rnn.py |
from .modules import * # noqa: F403
| pytorch-master | torch/nn/intrinsic/__init__.py |
from .modules import * # noqa: F403
| pytorch-master | torch/nn/intrinsic/qat/__init__.py |
import math
import torch
import torch.nn as nn
import torch.nn.intrinsic as nni
import torch.nn.qat as nnqat
import torch.nn.functional as F
from torch.nn import init
from torch.nn.utils import fuse_conv_bn_weights
from torch.nn.modules.utils import _single, _pair, _triple
from torch.nn.parameter import Parameter
from typing import TypeVar
__all__ = ['ConvBn1d', 'ConvBnReLU1d', 'ConvReLU1d', 'ConvBn2d', 'ConvBnReLU2d', 'ConvReLU2d', 'ConvBn3d',
'ConvBnReLU3d', 'ConvReLU3d', 'update_bn_stats', 'freeze_bn_stats']
_BN_CLASS_MAP = {
1: nn.BatchNorm1d,
2: nn.BatchNorm2d,
3: nn.BatchNorm3d,
}
MOD = TypeVar('MOD', bound=nn.modules.conv._ConvNd)
class _ConvBnNd(nn.modules.conv._ConvNd, nni._FusedModule):
_version = 2
_FLOAT_MODULE = MOD
def __init__(self,
# ConvNd args
in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding,
groups,
bias,
padding_mode,
# BatchNormNd args
# num_features: out_channels
eps=1e-05, momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None,
dim=2):
nn.modules.conv._ConvNd.__init__(self, in_channels, out_channels, kernel_size,
stride, padding, dilation, transposed,
output_padding, groups, False, padding_mode)
assert qconfig, 'qconfig must be provided for QAT module'
self.qconfig = qconfig
self.freeze_bn = freeze_bn if self.training else True
self.bn = _BN_CLASS_MAP[dim](out_channels, eps, momentum, True, True)
self.weight_fake_quant = self.qconfig.weight()
if bias:
self.bias = Parameter(torch.empty(out_channels))
else:
self.register_parameter('bias', None)
self.reset_bn_parameters()
# this needs to be called after reset_bn_parameters,
# as they modify the same state
if self.training:
if freeze_bn:
self.freeze_bn_stats()
else:
self.update_bn_stats()
else:
self.freeze_bn_stats()
def reset_running_stats(self):
self.bn.reset_running_stats()
def reset_bn_parameters(self):
self.bn.reset_running_stats()
init.uniform_(self.bn.weight)
init.zeros_(self.bn.bias)
# note: below is actully for conv, not BN
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def reset_parameters(self):
super(_ConvBnNd, self).reset_parameters()
def update_bn_stats(self):
self.freeze_bn = False
self.bn.training = True
return self
def freeze_bn_stats(self):
self.freeze_bn = True
self.bn.training = False
return self
def _forward(self, input):
assert self.bn.running_var is not None
running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
scale_factor = self.bn.weight / running_std
weight_shape = [1] * len(self.weight.shape)
weight_shape[0] = -1
bias_shape = [1] * len(self.weight.shape)
bias_shape[1] = -1
scaled_weight = self.weight_fake_quant(self.weight * scale_factor.reshape(weight_shape))
# using zero bias here since the bias for original conv
# will be added later
if self.bias is not None:
zero_bias = torch.zeros_like(self.bias)
else:
zero_bias = torch.zeros(self.out_channels, device=scaled_weight.device)
conv = self._conv_forward(input, scaled_weight, zero_bias)
conv_orig = conv / scale_factor.reshape(bias_shape)
if self.bias is not None:
conv_orig = conv_orig + self.bias.reshape(bias_shape)
conv = self.bn(conv_orig)
return conv
def extra_repr(self):
# TODO(jerryzh): extend
return super(_ConvBnNd, self).extra_repr()
def forward(self, input):
return self._forward(input)
def train(self, mode=True):
"""
Batchnorm's training behavior is using the self.training flag. Prevent
changing it if BN is frozen. This makes sure that calling `model.train()`
on a model with a frozen BN will behave properly.
"""
self.training = mode
if not self.freeze_bn:
for module in self.children():
module.train(mode)
return self
# ===== Serialization version history =====
#
# Version 1/None
# self
# |--- weight : Tensor
# |--- bias : Tensor
# |--- gamma : Tensor
# |--- beta : Tensor
# |--- running_mean : Tensor
# |--- running_var : Tensor
# |--- num_batches_tracked : Tensor
#
# Version 2
# self
# |--- weight : Tensor
# |--- bias : Tensor
# |--- bn : Module
# |--- weight : Tensor (moved from v1.self.gamma)
# |--- bias : Tensor (moved from v1.self.beta)
# |--- running_mean : Tensor (moved from v1.self.running_mean)
# |--- running_var : Tensor (moved from v1.self.running_var)
# |--- num_batches_tracked : Tensor (moved from v1.self.num_batches_tracked)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if version is None or version == 1:
# BN related parameters and buffers were moved into the BN module for v2
v2_to_v1_names = {
'bn.weight': 'gamma',
'bn.bias': 'beta',
'bn.running_mean': 'running_mean',
'bn.running_var': 'running_var',
'bn.num_batches_tracked': 'num_batches_tracked',
}
for v2_name, v1_name in v2_to_v1_names.items():
if prefix + v1_name in state_dict:
state_dict[prefix + v2_name] = state_dict[prefix + v1_name]
state_dict.pop(prefix + v1_name)
elif prefix + v2_name in state_dict:
# there was a brief period where forward compatibility
# for this module was broken (between
# https://github.com/pytorch/pytorch/pull/38478
# and https://github.com/pytorch/pytorch/pull/38820)
# and modules emitted the v2 state_dict format while
# specifying that version == 1. This patches the forward
# compatibility issue by allowing the v2 style entries to
# be used.
pass
elif strict:
missing_keys.append(prefix + v2_name)
super(_ConvBnNd, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
@classmethod
def from_float(cls, mod):
r"""Create a qat module from a float module or qparams_dict
Args: `mod` a float module, either produced by torch.ao.quantization utilities
or directly from user
"""
# The ignore is because _FLOAT_MODULE is a TypeVar here where the bound
# has no __name__ (code is fine though)
assert type(mod) == cls._FLOAT_MODULE, 'qat.' + cls.__name__ + '.from_float only works for ' + \
cls._FLOAT_MODULE.__name__ # type: ignore[attr-defined]
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
assert mod.qconfig, 'Input float module must have a valid qconfig'
qconfig = mod.qconfig
conv, bn = mod[0], mod[1]
qat_convbn = cls(conv.in_channels, conv.out_channels, conv.kernel_size,
conv.stride, conv.padding, conv.dilation,
conv.groups, conv.bias is not None,
conv.padding_mode,
bn.eps, bn.momentum,
False,
qconfig)
qat_convbn.weight = conv.weight
qat_convbn.bias = conv.bias
qat_convbn.bn.weight = bn.weight
qat_convbn.bn.bias = bn.bias
qat_convbn.bn.running_mean = bn.running_mean
qat_convbn.bn.running_var = bn.running_var
# mypy error: Cannot determine type of 'num_batches_tracked'
qat_convbn.bn.num_batches_tracked = bn.num_batches_tracked # type: ignore[has-type]
return qat_convbn
def to_float(self):
cls = type(self)
conv = cls._FLOAT_CONV_MODULE( # type: ignore[attr-defined]
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
self.groups,
self.bias is not None,
self.padding_mode)
conv.weight = torch.nn.Parameter(self.weight.detach())
if self.bias is not None:
conv.bias = torch.nn.Parameter(self.bias.detach())
if cls._FLOAT_BN_MODULE: # type: ignore[attr-defined]
# fuse bn into conv
conv.weight, conv.bias = fuse_conv_bn_weights(
conv.weight,
conv.bias,
self.bn.running_mean,
self.bn.running_var,
self.bn.eps,
self.bn.weight,
self.bn.bias
)
if cls._FLOAT_RELU_MODULE: # type: ignore[attr-defined]
modules = []
modules.append(conv)
relu = cls._FLOAT_RELU_MODULE() # type: ignore[attr-defined]
modules.append(relu)
conv_relu = cls._FUSED_FLOAT_MODULE(*modules) # type: ignore[attr-defined]
conv_relu.train(self.training)
return conv_relu
else:
conv.train(self.training)
return conv
class ConvBn1d(_ConvBnNd, nn.Conv1d):
r"""
A ConvBn1d module is a module fused from Conv1d and BatchNorm1d,
attached with FakeQuantize modules for weight,
used in quantization aware training.
We combined the interface of :class:`torch.nn.Conv1d` and
:class:`torch.nn.BatchNorm1d`.
Similar to :class:`torch.nn.Conv1d`, with FakeQuantize modules initialized
to default.
Attributes:
freeze_bn:
weight_fake_quant: fake quant module for weight
"""
_FLOAT_BN_MODULE = nn.BatchNorm1d
_FLOAT_RELU_MODULE = None
_FLOAT_MODULE = nni.ConvBn1d
_FLOAT_CONV_MODULE = nn.Conv1d
def __init__(self,
# Conv1d args
in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=None,
padding_mode='zeros',
# BatchNorm1d args
# num_features: out_channels
eps=1e-05, momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
_ConvBnNd.__init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, False, _single(0), groups, bias, padding_mode,
eps, momentum, freeze_bn, qconfig, dim=1)
class ConvBnReLU1d(ConvBn1d):
r"""
A ConvBnReLU1d module is a module fused from Conv1d, BatchNorm1d and ReLU,
attached with FakeQuantize modules for weight,
used in quantization aware training.
We combined the interface of :class:`torch.nn.Conv1d` and
:class:`torch.nn.BatchNorm1d` and :class:`torch.nn.ReLU`.
Similar to `torch.nn.Conv1d`, with FakeQuantize modules initialized to
default.
Attributes:
weight_fake_quant: fake quant module for weight
"""
# base class defines _FLOAT_MODULE as "ConvBn1d"
_FLOAT_MODULE = nni.ConvBnReLU1d # type: ignore[assignment]
_FLOAT_CONV_MODULE = nn.Conv1d
_FLOAT_BN_MODULE = nn.BatchNorm1d
_FLOAT_RELU_MODULE = nn.ReLU # type: ignore[assignment]
# module class after fusing bn into conv
_FUSED_FLOAT_MODULE = nni.ConvReLU1d
def __init__(self,
# Conv1d args
in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=None,
padding_mode='zeros',
# BatchNorm1d args
# num_features: out_channels
eps=1e-05, momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None):
super().__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias,
padding_mode, eps, momentum,
freeze_bn,
qconfig)
def forward(self, input):
return F.relu(ConvBn1d._forward(self, input))
@classmethod
def from_float(cls, mod):
return super(ConvBnReLU1d, cls).from_float(mod)
class ConvReLU1d(nnqat.Conv1d, nni._FusedModule):
r"""A ConvReLU1d module is a fused module of Conv1d and ReLU, attached with
FakeQuantize modules for weight for
quantization aware training.
We combined the interface of :class:`~torch.nn.Conv1d` and
:class:`~torch.nn.BatchNorm1d`.
Attributes:
weight_fake_quant: fake quant module for weight
"""
_FLOAT_MODULE = nni.ConvReLU1d
_FLOAT_CONV_MODULE = nn.Conv1d
_FLOAT_BN_MODULE = None
_FLOAT_RELU_MODULE = nn.ReLU
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros',
qconfig=None):
super(ConvReLU1d, self).__init__(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias, padding_mode=padding_mode,
qconfig=qconfig)
assert qconfig, 'qconfig must be provided for QAT module'
self.qconfig = qconfig
self.weight_fake_quant = self.qconfig.weight()
def forward(self, input):
return F.relu(
self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias))
@classmethod
def from_float(cls, mod):
return super(ConvReLU1d, cls).from_float(mod)
class ConvBn2d(_ConvBnNd, nn.Conv2d):
r"""
A ConvBn2d module is a module fused from Conv2d and BatchNorm2d,
attached with FakeQuantize modules for weight,
used in quantization aware training.
We combined the interface of :class:`torch.nn.Conv2d` and
:class:`torch.nn.BatchNorm2d`.
Similar to :class:`torch.nn.Conv2d`, with FakeQuantize modules initialized
to default.
Attributes:
freeze_bn:
weight_fake_quant: fake quant module for weight
"""
_FLOAT_MODULE = nni.ConvBn2d
_FLOAT_CONV_MODULE = nn.Conv2d
_FLOAT_BN_MODULE = nn.BatchNorm2d
_FLOAT_RELU_MODULE = None
def __init__(self,
# ConvNd args
in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=None,
padding_mode='zeros',
# BatchNorm2d args
# num_features: out_channels
eps=1e-05, momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
_ConvBnNd.__init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, False, _pair(0), groups, bias, padding_mode,
eps, momentum, freeze_bn, qconfig, dim=2)
class ConvBnReLU2d(ConvBn2d):
r"""
A ConvBnReLU2d module is a module fused from Conv2d, BatchNorm2d and ReLU,
attached with FakeQuantize modules for weight,
used in quantization aware training.
We combined the interface of :class:`torch.nn.Conv2d` and
:class:`torch.nn.BatchNorm2d` and :class:`torch.nn.ReLU`.
Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to
default.
Attributes:
weight_fake_quant: fake quant module for weight
"""
# base class defines _FLOAT_MODULE as "ConvBn2d"
_FLOAT_MODULE = nni.ConvBnReLU2d # type: ignore[assignment]
_FLOAT_CONV_MODULE = nn.Conv2d
_FLOAT_BN_MODULE = nn.BatchNorm2d
_FLOAT_RELU_MODULE = nn.ReLU # type: ignore[assignment]
# module class after fusing bn into conv
_FUSED_FLOAT_MODULE = nni.ConvReLU2d
def __init__(self,
# Conv2d args
in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=None,
padding_mode='zeros',
# BatchNorm2d args
# num_features: out_channels
eps=1e-05, momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None):
super(ConvBnReLU2d, self).__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias,
padding_mode, eps, momentum,
freeze_bn,
qconfig)
def forward(self, input):
return F.relu(ConvBn2d._forward(self, input))
@classmethod
def from_float(cls, mod):
return super(ConvBnReLU2d, cls).from_float(mod)
class ConvReLU2d(nnqat.Conv2d, nni._FusedModule):
r"""A ConvReLU2d module is a fused module of Conv2d and ReLU, attached with
FakeQuantize modules for weight for
quantization aware training.
We combined the interface of :class:`~torch.nn.Conv2d` and
:class:`~torch.nn.BatchNorm2d`.
Attributes:
weight_fake_quant: fake quant module for weight
"""
_FLOAT_MODULE = nni.ConvReLU2d
_FLOAT_CONV_MODULE = nn.Conv2d
_FLOAT_BN_MODULE = None
_FLOAT_RELU_MODULE = nn.ReLU
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros',
qconfig=None):
super(ConvReLU2d, self).__init__(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias, padding_mode=padding_mode,
qconfig=qconfig)
assert qconfig, 'qconfig must be provided for QAT module'
self.qconfig = qconfig
self.weight_fake_quant = self.qconfig.weight()
def forward(self, input):
return F.relu(
self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias))
@classmethod
def from_float(cls, mod):
return super(ConvReLU2d, cls).from_float(mod)
class ConvBn3d(_ConvBnNd, nn.Conv3d):
r"""
A ConvBn3d module is a module fused from Conv3d and BatchNorm3d,
attached with FakeQuantize modules for weight,
used in quantization aware training.
We combined the interface of :class:`torch.nn.Conv3d` and
:class:`torch.nn.BatchNorm3d`.
Similar to :class:`torch.nn.Conv3d`, with FakeQuantize modules initialized
to default.
Attributes:
freeze_bn:
weight_fake_quant: fake quant module for weight
"""
_FLOAT_MODULE = nni.ConvBn3d
_FLOAT_CONV_MODULE = nn.Conv3d
_FLOAT_BN_MODULE = nn.BatchNorm3d
_FLOAT_RELU_MODULE = None
def __init__(
self,
# ConvNd args
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=None,
padding_mode="zeros",
# BatchNorm3d args
# num_features: out_channels
eps=1e-05,
momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None,
):
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
_ConvBnNd.__init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
False,
_triple(0),
groups,
bias,
padding_mode,
eps,
momentum,
freeze_bn,
qconfig,
dim=3,
)
class ConvBnReLU3d(ConvBn3d):
r"""
A ConvBnReLU3d module is a module fused from Conv3d, BatchNorm3d and ReLU,
attached with FakeQuantize modules for weight,
used in quantization aware training.
We combined the interface of :class:`torch.nn.Conv3d` and
:class:`torch.nn.BatchNorm3d` and :class:`torch.nn.ReLU`.
Similar to `torch.nn.Conv3d`, with FakeQuantize modules initialized to
default.
Attributes:
weight_fake_quant: fake quant module for weight
"""
_FLOAT_MODULE = nni.ConvBnReLU3d # type: ignore[assignment]
_FLOAT_CONV_MODULE = nn.Conv3d
_FLOAT_BN_MODULE = nn.BatchNorm3d
_FLOAT_RELU_MODULE = nn.ReLU # type: ignore[assignment]
# module class after fusing bn into conv
_FUSED_FLOAT_MODULE = nni.ConvReLU3d
def __init__(
self,
# Conv3d args
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=None,
padding_mode="zeros",
# BatchNorm3d args
# num_features: out_channels
eps=1e-05,
momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None,
):
super(ConvBnReLU3d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode,
eps,
momentum,
freeze_bn,
qconfig,
)
def forward(self, input):
return F.relu(ConvBn3d._forward(self, input))
@classmethod
def from_float(cls, mod):
return super(ConvBnReLU3d, cls).from_float(mod)
class ConvReLU3d(nnqat.Conv3d, nni._FusedModule):
r"""A ConvReLU3d module is a fused module of Conv3d and ReLU, attached with
FakeQuantize modules for weight for
quantization aware training.
We combined the interface of :class:`~torch.nn.Conv3d` and
:class:`~torch.nn.BatchNorm3d`.
Attributes:
weight_fake_quant: fake quant module for weight
"""
_FLOAT_MODULE = nni.ConvReLU3d
_FLOAT_CONV_MODULE = nn.Conv3d
_FLOAT_BN_MODULE = None
_FLOAT_RELU_MODULE = nn.ReLU
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
qconfig=None,
):
super(ConvReLU3d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
padding_mode=padding_mode,
qconfig=qconfig,
)
assert qconfig, "qconfig must be provided for QAT module"
self.qconfig = qconfig
self.weight_fake_quant = self.qconfig.weight()
def forward(self, input):
return F.relu(
self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
)
@classmethod
def from_float(cls, mod):
return super(ConvReLU3d, cls).from_float(mod)
def update_bn_stats(mod):
if type(mod) in set(
[ConvBnReLU1d, ConvBnReLU2d, ConvBnReLU3d, ConvBn1d, ConvBn2d, ConvBn3d]
):
mod.update_bn_stats()
def freeze_bn_stats(mod):
if type(mod) in set(
[ConvBnReLU1d, ConvBnReLU2d, ConvBnReLU3d, ConvBn1d, ConvBn2d, ConvBn3d]
):
mod.freeze_bn_stats()
| pytorch-master | torch/nn/intrinsic/qat/modules/conv_fused.py |
from .linear_relu import LinearReLU
from .linear_fused import LinearBn1d
from .conv_fused import (
ConvBn1d,
ConvBn2d,
ConvBn3d,
ConvBnReLU1d,
ConvBnReLU2d,
ConvBnReLU3d,
ConvReLU1d,
ConvReLU2d,
ConvReLU3d,
update_bn_stats,
freeze_bn_stats,
)
__all__ = [
"LinearReLU",
"LinearBn1d",
"ConvReLU1d",
"ConvReLU2d",
"ConvReLU3d",
"ConvBn1d",
"ConvBn2d",
"ConvBn3d",
"ConvBnReLU1d",
"ConvBnReLU2d",
"ConvBnReLU3d",
"update_bn_stats",
"freeze_bn_stats",
]
| pytorch-master | torch/nn/intrinsic/qat/modules/__init__.py |
import torch
import torch.nn.qat as nnqat
import torch.nn.intrinsic as nni
import torch.nn.functional as F
class LinearReLU(nnqat.Linear, nni._FusedModule):
r"""
A LinearReLU module fused from Linear and ReLU modules, attached with
FakeQuantize modules for weight, used in
quantization aware training.
We adopt the same interface as :class:`torch.nn.Linear`.
Similar to `torch.nn.intrinsic.LinearReLU`, with FakeQuantize modules initialized to
default.
Attributes:
weight: fake quant module for weight
Examples::
>>> # xdoctest: +SKIP
>>> m = nn.qat.LinearReLU(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
_FLOAT_MODULE = nni.LinearReLU
def __init__(self, in_features, out_features, bias=True,
qconfig=None):
super(LinearReLU, self).__init__(in_features, out_features, bias, qconfig)
def forward(self, input):
return F.relu(F.linear(input, self.weight_fake_quant(self.weight), self.bias))
@classmethod
def from_float(cls, mod):
return super(LinearReLU, cls).from_float(mod)
def to_float(self):
linear = torch.nn.Linear(self.in_features, self.out_features, self.bias is not None)
linear.weight = torch.nn.Parameter(self.weight.detach())
if self.bias is not None:
linear.bias = torch.nn.Parameter(self.bias.detach())
relu = torch.nn.ReLU()
return torch.nn.intrinsic.LinearReLU(linear, relu)
| pytorch-master | torch/nn/intrinsic/qat/modules/linear_relu.py |
import torch
import torch.nn as nn
import torch.nn.intrinsic as nni
import torch.nn.functional as F
from torch.nn import init
from torch.nn.parameter import Parameter
from torch.nn.utils.fusion import fuse_linear_bn_weights
class LinearBn1d(nn.modules.linear.Linear, nni._FusedModule):
r"""
A LinearBn1d module is a module fused from Linear and BatchNorm1d, attached
with FakeQuantize modules for weight, used in quantization aware training.
We combined the interface of :class:`torch.nn.Linear` and
:class:torch.nn.BatchNorm1d`.
Similar to :class:`torch.nn.Linear`, with FakeQuantize modules initialized
to default.
Attributes:
freeze_bn:
weight_fake_quant: fake quant module for weight
"""
def __init__(self,
# Linear args
in_features, out_features, bias=True,
# BatchNorm1d args
# num_features: out_features
eps=1e-05, momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None):
nn.modules.linear.Linear.__init__(self, in_features, out_features, bias)
assert qconfig, 'qconfig must be provded for QAT module'
self.qconfig = qconfig
self.freeze_bn = freeze_bn if self.training else True
self.bn = nn.BatchNorm1d(out_features, eps, momentum, True, True)
self.weight_fake_quant = self.qconfig.weight()
if bias:
self.bias = Parameter(torch.empty(out_features))
else:
self.register_parameter('bias', None)
self.reset_bn_parameters()
# this needs to be called after reset_bn_parameters,
# as they modify the same state
if self.training:
if freeze_bn:
self.freeze_bn_stats()
else:
self.update_bn_stats()
else:
self.freeze_bn_stats()
def reset_running_stats(self):
self.bn.reset_running_stats()
def reset_bn_parameters(self):
self.bn.reset_running_stats()
init.uniform_(self.bn.weight)
init.zeros_(self.bn.bias)
def reset_parameters(self):
super(LinearBn1d, self).reset_parameters()
def update_bn_stats(self):
self.freeze_bn = False
self.bn.training = True
return self
def freeze_bn_stats(self):
self.freeze_bn = True
self.bn.training = False
return self
def forward(self, input):
assert self.bn.running_var is not None
# Scale the linear weights by BN's running statistics to reduce
# weight jitter, see https://arxiv.org/pdf/1806.08342.pdf, page 18
# for motivation.
#
# Instead of
#
# x1 = F.linear(x0, fq(w), b)
# x2 = self.bn(x1)
#
# We have
#
# # scale the weight by previous batch's running statistics
# scale_factor = bn.w / bn.running_std_from_prev_batch
# # do the linear transformation without bias
# x1_scaled = F.linear(x0, fq(w * scale_factor), 0)
# # reverse the scaling and add original bias
# x1_orig = x1_scaled / scale_factor + b
# x2 = self.bn(x1_orig)
running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
scale_factor = self.bn.weight / running_std
weight_shape = [1] * len(self.weight.shape)
weight_shape[0] = -1
bias_shape = [1] * len(self.weight.shape)
bias_shape[1] = -1
scaled_weight = self.weight_fake_quant(self.weight * scale_factor.reshape(weight_shape))
if self.bias is not None:
zero_bias = torch.zeros_like(self.bias)
else:
zero_bias = torch.zeros(self.out_features, device=scaled_weight.device)
linear_out = F.linear(input, scaled_weight, zero_bias)
linear_out_orig = linear_out / scale_factor.reshape(bias_shape)
if self.bias is not None:
linear_out_orig = linear_out_orig + self.bias.reshape(bias_shape)
bn_out = self.bn(linear_out_orig)
return bn_out
def train(self, mode=True):
"""
Batchnorm's training behavior is using the self.training flag. Prevent
changing it if BN is frozen. This makes sure that calling `model.train()`
on a model with a frozen BN will behave properly.
"""
self.training = mode
if not self.freeze_bn:
for module in self.children():
module.train(mode)
return self
@classmethod
def from_float(cls, mod):
r"""Create a qat module from a float module or qparams_dict
Args: `mod' a float module, either produced by torch.ao.quantization
utilities or directly from user
"""
assert type(mod) == nni.LinearBn1d, 'qat.' + cls.__name__ + \
'.from_float only works for ' + nni.LinearBn1d.__name__
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
assert mod.qconfig, 'Input float module must have a valid config'
qconfig = mod.qconfig
linear, bn = mod[0], mod[1]
qat_linearbn = cls(linear.in_features, linear.out_features, linear.bias is not None,
bn.eps, bn.momentum,
False, qconfig)
qat_linearbn.weight = linear.weight
qat_linearbn.bias = linear.bias
qat_linearbn.bn.weight = bn.weight
qat_linearbn.bn.bias = bn.bias
qat_linearbn.bn.running_mean = bn.running_mean
qat_linearbn.bn.running_var = bn.running_var
qat_linearbn.bn.num_batches_tracked = bn.num_batches_tracked
return qat_linearbn
def to_float(self):
linear = torch.nn.Linear(self.in_features, self.out_features)
linear.weight, linear.bias = fuse_linear_bn_weights(
self.weight,
self.bias,
self.bn.running_mean,
self.bn.running_var,
self.bn.eps,
self.bn.weight,
self.bn.bias)
return linear
| pytorch-master | torch/nn/intrinsic/qat/modules/linear_fused.py |
from .modules import * # noqa: F403
| pytorch-master | torch/nn/intrinsic/quantized/__init__.py |
from .modules import * # noqa: F403
| pytorch-master | torch/nn/intrinsic/quantized/dynamic/__init__.py |
import torch
from .linear_relu import LinearReLU
__all__ = [
'LinearReLU',
]
| pytorch-master | torch/nn/intrinsic/quantized/dynamic/modules/__init__.py |
import torch
import torch.nn.quantized.dynamic as nnqd
import torch.nn.intrinsic as nni
class LinearReLU(nnqd.Linear):
r"""
A LinearReLU module fused from Linear and ReLU modules that can be used
for dynamic quantization.
Supports both, FP16 and INT8 quantization.
We adopt the same interface as :class:`torch.nn.quantized.dynamic.Linear`.
Attributes:
Same as torch.nn.quantized.dynamic.Linear
Examples::
>>> m = nn.intrinsic.quantized.dynamic.LinearReLU(20, 30)
>>> input = torch.randn(128, 20)
>>> # xdoctest: +SKIP
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
_FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment]
def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8):
super().__init__(in_features, out_features, bias, dtype)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self._packed_params.dtype == torch.qint8:
# TODO check if we should set reduce_rage = True by default here
Y = torch.ops.quantized.linear_relu_dynamic(
x, self._packed_params._packed_params, reduce_range=True)
elif self._packed_params.dtype == torch.float16:
Y = torch.ops.quantized.linear_relu_dynamic_fp16(
x, self._packed_params._packed_params)
else:
raise RuntimeError('Unsupported dtype on dynamic quantized linear relu!')
return Y.to(x.dtype)
def _get_name(self):
return 'DynamicQuantizedLinearReLU'
@classmethod
def from_float(cls, mod):
return super(LinearReLU, cls).from_float(mod)
@classmethod
def from_reference(cls, ref_qlinear_relu):
return super().from_reference(ref_qlinear_relu[0])
| pytorch-master | torch/nn/intrinsic/quantized/dynamic/modules/linear_relu.py |
import torch
import torch.nn.intrinsic
import torch.nn.intrinsic.qat
import torch.nn.quantized as nnq
class BNReLU2d(nnq.BatchNorm2d):
r"""
A BNReLU2d module is a fused module of BatchNorm2d and ReLU
We adopt the same interface as :class:`torch.nn.quantized.BatchNorm2d`.
Attributes:
Same as torch.nn.quantized.BatchNorm2d
"""
_FLOAT_MODULE = torch.nn.intrinsic.BNReLU2d
def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):
super(BNReLU2d, self).__init__(num_features, eps=eps, momentum=momentum, device=device, dtype=dtype)
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
return torch.ops.quantized.batch_norm2d_relu(
input, self.weight, self.bias, self.running_mean,
self.running_var, self.eps, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedBNReLU2d'
@classmethod
def from_float(cls, mod):
# TODO: Add qat support for BNReLU2d
return super(BNReLU2d, cls).from_float(mod)
@classmethod
def from_reference(cls, bn_relu, output_scale, output_zero_point):
return super().from_reference(bn_relu[0], output_scale, output_zero_point)
class BNReLU3d(nnq.BatchNorm3d):
r"""
A BNReLU3d module is a fused module of BatchNorm3d and ReLU
We adopt the same interface as :class:`torch.nn.quantized.BatchNorm3d`.
Attributes:
Same as torch.nn.quantized.BatchNorm3d
"""
_FLOAT_MODULE = torch.nn.intrinsic.BNReLU3d
def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):
super(BNReLU3d, self).__init__(num_features, eps=eps, momentum=momentum, device=device, dtype=dtype)
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 5:
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
return torch.ops.quantized.batch_norm3d_relu(
input, self.weight, self.bias, self.running_mean,
self.running_var, self.eps, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedBNReLU3d'
@classmethod
def from_float(cls, mod):
# TODO: Add qat support for BNReLU3d
return super(BNReLU3d, cls).from_float(mod)
@classmethod
def from_reference(cls, bn_relu, output_scale, output_zero_point):
return super().from_reference(bn_relu[0], output_scale, output_zero_point)
| pytorch-master | torch/nn/intrinsic/quantized/modules/bn_relu.py |
from .linear_relu import LinearReLU
from .conv_relu import ConvReLU1d, ConvReLU2d, ConvReLU3d
from .bn_relu import BNReLU2d, BNReLU3d
__all__ = [
'LinearReLU',
'ConvReLU1d',
'ConvReLU2d',
'ConvReLU3d',
'BNReLU2d',
'BNReLU3d',
]
| pytorch-master | torch/nn/intrinsic/quantized/modules/__init__.py |
import torch
import torch.nn.intrinsic
import torch.nn.intrinsic.qat
import torch.nn.functional as F
import torch.nn.quantized as nnq
from torch.nn.utils import fuse_conv_bn_weights
_reverse_repeat_padding = nnq.modules.conv._reverse_repeat_padding
# TODO: factor out the common parts to ConvNd
class ConvReLU1d(nnq.Conv1d):
r"""
A ConvReLU1d module is a fused module of Conv1d and ReLU
We adopt the same interface as :class:`torch.nn.quantized.Conv1d`.
Attributes:
Same as torch.nn.quantized.Conv1d
"""
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU1d # type: ignore[assignment]
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros', device=None, dtype=None):
super(ConvReLU1d, self).__init__(
in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias,
padding_mode=padding_mode, device=device, dtype=dtype)
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 3:
raise ValueError("Input shape must be `(N, C, L)`!")
if self.padding_mode != 'zeros':
# Padding in Conv1d is stored as (p, p), need to get (p,)
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding[:1])
input = F.pad(input, _reversed_padding_repeated_twice,
mode=self.padding_mode)
return torch.ops.quantized.conv1d_relu(
input, self._packed_params, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedConvReLU1d'
@classmethod
def from_float(cls, mod):
if type(mod) == torch.nn.intrinsic.qat.ConvBnReLU1d:
mod.weight, mod.bias = fuse_conv_bn_weights(
mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var,
mod.bn.eps, mod.bn.weight, mod.bn.bias)
return super(ConvReLU1d, cls).from_float(mod)
@classmethod
def from_reference(cls, ref_qconv, output_scale, output_zero_point):
assert type(ref_qconv) != torch.nn.intrinsic.ConvBnReLU1d, \
"BatchNorm1d should be fused into Conv1d before converting to reference module"
return super().from_reference(ref_qconv[0], output_scale, output_zero_point)
class ConvReLU2d(nnq.Conv2d):
r"""
A ConvReLU2d module is a fused module of Conv2d and ReLU
We adopt the same interface as :class:`torch.nn.quantized.Conv2d`.
Attributes:
Same as torch.nn.quantized.Conv2d
"""
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU2d # type: ignore[assignment]
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros', device=None, dtype=None):
super(ConvReLU2d, self).__init__(
in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias,
padding_mode=padding_mode, device=device, dtype=dtype)
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
if self.padding_mode != 'zeros':
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
input = F.pad(input, _reversed_padding_repeated_twice,
mode=self.padding_mode)
return torch.ops.quantized.conv2d_relu(
input, self._packed_params, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedConvReLU2d'
@classmethod
def from_float(cls, mod):
if type(mod) == torch.nn.intrinsic.qat.ConvBnReLU2d:
mod.weight, mod.bias = fuse_conv_bn_weights(
mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var,
mod.bn.eps, mod.bn.weight, mod.bn.bias)
return super(ConvReLU2d, cls).from_float(mod)
@classmethod
def from_reference(cls, ref_qconv, output_scale, output_zero_point):
assert type(ref_qconv) != torch.nn.intrinsic.ConvBnReLU2d, \
"BatchNorm2d should be fused into Conv2d before converting to reference module"
return super().from_reference(ref_qconv[0], output_scale, output_zero_point)
class ConvReLU3d(nnq.Conv3d):
r"""
A ConvReLU3d module is a fused module of Conv3d and ReLU
We adopt the same interface as :class:`torch.nn.quantized.Conv3d`.
Attributes: Same as torch.nn.quantized.Conv3d
"""
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU3d # type: ignore[assignment]
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros', device=None, dtype=None):
assert padding_mode != 'reflect', "Conv3d does not support reflection padding"
super(ConvReLU3d, self).__init__(
in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias,
padding_mode=padding_mode, device=device, dtype=dtype)
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 5:
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
if self.padding_mode != 'zeros':
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
input = F.pad(input, _reversed_padding_repeated_twice,
mode=self.padding_mode)
return torch.ops.quantized.conv3d_relu(
input, self._packed_params, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedConvReLU3d'
@classmethod
def from_float(cls, mod):
if type(mod) == torch.nn.intrinsic.qat.ConvBnReLU3d:
mod.weight, mod.bias = fuse_conv_bn_weights(
mod.weight,
mod.bias,
mod.bn.running_mean,
mod.bn.running_var,
mod.bn.eps,
mod.bn.weight,
mod.bn.bias,
)
return super(ConvReLU3d, cls).from_float(mod)
@classmethod
def from_reference(cls, ref_qconv, output_scale, output_zero_point):
assert type(ref_qconv) != torch.nn.intrinsic.ConvBnReLU3d, \
"BatchNorm3d should be fused into Conv3d before converting to reference module"
return super().from_reference(ref_qconv[0], output_scale, output_zero_point)
| pytorch-master | torch/nn/intrinsic/quantized/modules/conv_relu.py |
import torch
import torch.nn.quantized as nnq
import torch.nn.intrinsic as nni
class LinearReLU(nnq.Linear):
r"""
A LinearReLU module fused from Linear and ReLU modules
We adopt the same interface as :class:`torch.nn.quantized.Linear`.
Attributes:
Same as torch.nn.quantized.Linear
Examples::
>>> # xdoctest: +SKIP
>>> m = nn.intrinsic.LinearReLU(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
_FLOAT_MODULE = nni.LinearReLU
def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8):
super().__init__(in_features, out_features, bias, dtype)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.ops.quantized.linear_relu(
x, self._packed_params._packed_params, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedLinearReLU'
@classmethod
def from_float(cls, mod):
return super(LinearReLU, cls).from_float(mod)
@classmethod
def from_reference(cls, ref_linear_relu, output_scale, output_zero_point):
return super().from_reference(ref_linear_relu[0], output_scale, output_zero_point)
| pytorch-master | torch/nn/intrinsic/quantized/modules/linear_relu.py |
import torch
from torch.nn import Conv1d, Conv2d, Conv3d, ReLU, Linear, BatchNorm1d, BatchNorm2d, BatchNorm3d
from torch.nn.utils.parametrize import type_before_parametrizations
__all__ = ['ConvReLU1d', 'ConvReLU2d', 'ConvReLU3d', 'LinearReLU', 'ConvBn1d', 'ConvBn2d',
'ConvBnReLU1d', 'ConvBnReLU2d', 'ConvBn3d', 'ConvBnReLU3d', 'BNReLU2d', 'BNReLU3d',
'LinearBn1d']
# Used for identifying intrinsic modules used in quantization
class _FusedModule(torch.nn.Sequential):
pass
class ConvReLU1d(_FusedModule):
r"""This is a sequential container which calls the Conv1d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type_before_parametrizations(conv) == Conv1d and type_before_parametrizations(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type_before_parametrizations(conv), type_before_parametrizations(relu))
super().__init__(conv, relu)
class ConvReLU2d(_FusedModule):
r"""This is a sequential container which calls the Conv2d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type_before_parametrizations(conv) == Conv2d and type_before_parametrizations(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type_before_parametrizations(conv), type_before_parametrizations(relu))
super().__init__(conv, relu)
class ConvReLU3d(_FusedModule):
r"""This is a sequential container which calls the Conv3d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type_before_parametrizations(conv), type_before_parametrizations(relu))
super().__init__(conv, relu)
class LinearReLU(_FusedModule):
r"""This is a sequential container which calls the Linear and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, linear, relu):
assert type_before_parametrizations(linear) == Linear and type_before_parametrizations(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type_before_parametrizations(linear), type_before_parametrizations(relu))
super().__init__(linear, relu)
class ConvBn1d(_FusedModule):
r"""This is a sequential container which calls the Conv 1d and Batch Norm 1d modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn):
assert type_before_parametrizations(conv) == Conv1d and type_before_parametrizations(bn) == BatchNorm1d, \
'Incorrect types for input modules{}{}'.format(
type_before_parametrizations(conv), type_before_parametrizations(bn))
super().__init__(conv, bn)
class ConvBn2d(_FusedModule):
r"""This is a sequential container which calls the Conv 2d and Batch Norm 2d modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn):
assert type_before_parametrizations(conv) == Conv2d and type_before_parametrizations(bn) == BatchNorm2d, \
'Incorrect types for input modules{}{}'.format(
type_before_parametrizations(conv), type_before_parametrizations(bn))
super(ConvBn2d, self).__init__(conv, bn)
class ConvBnReLU1d(_FusedModule):
r"""This is a sequential container which calls the Conv 1d, Batch Norm 1d, and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn, relu):
assert type_before_parametrizations(conv) == Conv1d and type_before_parametrizations(bn) == BatchNorm1d and \
type_before_parametrizations(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \
.format(type_before_parametrizations(conv), type_before_parametrizations(bn), type_before_parametrizations(relu))
super().__init__(conv, bn, relu)
class ConvBnReLU2d(_FusedModule):
r"""This is a sequential container which calls the Conv 2d, Batch Norm 2d, and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn, relu):
assert type_before_parametrizations(conv) == Conv2d and type_before_parametrizations(bn) == BatchNorm2d and \
type_before_parametrizations(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \
.format(type_before_parametrizations(conv), type_before_parametrizations(bn), type_before_parametrizations(relu))
super().__init__(conv, bn, relu)
class ConvBn3d(_FusedModule):
r"""This is a sequential container which calls the Conv 3d and Batch Norm 3d modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn):
assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(bn) == BatchNorm3d, \
'Incorrect types for input modules{}{}'.format(
type_before_parametrizations(conv), type_before_parametrizations(bn))
super().__init__(conv, bn)
class ConvBnReLU3d(_FusedModule):
r"""This is a sequential container which calls the Conv 3d, Batch Norm 3d, and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn, relu):
assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(bn) == BatchNorm3d and \
type_before_parametrizations(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \
.format(type_before_parametrizations(conv), type_before_parametrizations(bn), type_before_parametrizations(relu))
super().__init__(conv, bn, relu)
class BNReLU2d(_FusedModule):
r"""This is a sequential container which calls the BatchNorm 2d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, batch_norm, relu):
assert type_before_parametrizations(batch_norm) == BatchNorm2d and type_before_parametrizations(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type_before_parametrizations(batch_norm), type_before_parametrizations(relu))
super().__init__(batch_norm, relu)
class BNReLU3d(_FusedModule):
r"""This is a sequential container which calls the BatchNorm 3d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, batch_norm, relu):
assert type_before_parametrizations(batch_norm) == BatchNorm3d and type_before_parametrizations(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type_before_parametrizations(batch_norm), type_before_parametrizations(relu))
super().__init__(batch_norm, relu)
class LinearBn1d(_FusedModule):
r"""This is a sequential container which calls the Linear and BatchNorm1d modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, linear, bn):
assert type_before_parametrizations(linear) == Linear and type_before_parametrizations(bn) == BatchNorm1d, \
'Incorrect types for input modules{}{}'.format(type_before_parametrizations(linear), type_before_parametrizations(bn))
super().__init__(linear, bn)
| pytorch-master | torch/nn/intrinsic/modules/fused.py |
from .fused import _FusedModule
from .fused import ConvBn1d
from .fused import ConvBn2d
from .fused import ConvBn3d
from .fused import ConvBnReLU1d
from .fused import ConvBnReLU2d
from .fused import ConvBnReLU3d
from .fused import ConvReLU1d
from .fused import ConvReLU2d
from .fused import ConvReLU3d
from .fused import LinearReLU
from .fused import BNReLU2d
from .fused import BNReLU3d
from .fused import LinearBn1d
__all__ = [
'_FusedModule',
'ConvBn1d',
'ConvBn2d',
'ConvBn3d',
'ConvBnReLU1d',
'ConvBnReLU2d',
'ConvBnReLU3d',
'ConvReLU1d',
'ConvReLU2d',
'ConvReLU3d',
'LinearReLU',
'BNReLU2d',
'BNReLU3d',
'LinearBn1d',
]
| pytorch-master | torch/nn/intrinsic/modules/__init__.py |
from .module import Module
from .. import functional as F
from torch import Tensor
from typing import Optional
from ..common_types import _size_2_t, _ratio_2_t, _size_any_t, _ratio_any_t
__all__ = ['Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d']
class Upsample(Module):
r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data.
The input data is assumed to be of the form
`minibatch x channels x [optional depth] x [optional height] x width`.
Hence, for spatial inputs, we expect a 4D Tensor and for volumetric inputs, we expect a 5D Tensor.
The algorithms available for upsampling are nearest neighbor and linear,
bilinear, bicubic and trilinear for 3D, 4D and 5D input Tensor,
respectively.
One can either give a :attr:`scale_factor` or the target output :attr:`size` to
calculate the output size. (You cannot give both, as it is ambiguous)
Args:
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional):
output spatial sizes
scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional):
multiplier for spatial size. Has to match input size if it is a tuple.
mode (str, optional): the upsampling algorithm: one of ``'nearest'``,
``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``.
Default: ``'nearest'``
align_corners (bool, optional): if ``True``, the corner pixels of the input
and output tensors are aligned, and thus preserving the values at
those pixels. This only has effect when :attr:`mode` is
``'linear'``, ``'bilinear'``, ``'bicubic'``, or ``'trilinear'``.
Default: ``False``
recompute_scale_factor (bool, optional): recompute the scale_factor for use in the
interpolation calculation. If `recompute_scale_factor` is ``True``, then
`scale_factor` must be passed in and `scale_factor` is used to compute the
output `size`. The computed output `size` will be used to infer new scales for
the interpolation. Note that when `scale_factor` is floating-point, it may differ
from the recomputed `scale_factor` due to rounding and precision issues.
If `recompute_scale_factor` is ``False``, then `size` or `scale_factor` will
be used directly for interpolation.
Shape:
- Input: :math:`(N, C, W_{in})`, :math:`(N, C, H_{in}, W_{in})` or :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, W_{out})`, :math:`(N, C, H_{out}, W_{out})`
or :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = \left\lfloor D_{in} \times \text{scale\_factor} \right\rfloor
.. math::
H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
.. math::
W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
.. warning::
With ``align_corners = True``, the linearly interpolating modes
(`linear`, `bilinear`, `bicubic`, and `trilinear`) don't proportionally
align the output and input pixels, and thus the output values can depend
on the input size. This was the default behavior for these modes up to
version 0.3.1. Since then, the default behavior is
``align_corners = False``. See below for concrete examples on how this
affects the outputs.
.. note::
If you want downsampling/general resizing, you should use :func:`~nn.functional.interpolate`.
Examples::
>>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
>>> input
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> m = nn.Upsample(scale_factor=2, mode='nearest')
>>> m(input)
tensor([[[[ 1., 1., 2., 2.],
[ 1., 1., 2., 2.],
[ 3., 3., 4., 4.],
[ 3., 3., 4., 4.]]]])
>>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles")
>>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False
>>> m(input)
tensor([[[[ 1.0000, 1.2500, 1.7500, 2.0000],
[ 1.5000, 1.7500, 2.2500, 2.5000],
[ 2.5000, 2.7500, 3.2500, 3.5000],
[ 3.0000, 3.2500, 3.7500, 4.0000]]]])
>>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
>>> m(input)
tensor([[[[ 1.0000, 1.3333, 1.6667, 2.0000],
[ 1.6667, 2.0000, 2.3333, 2.6667],
[ 2.3333, 2.6667, 3.0000, 3.3333],
[ 3.0000, 3.3333, 3.6667, 4.0000]]]])
>>> # Try scaling the same data in a larger tensor
>>>
>>> input_3x3 = torch.zeros(3, 3).view(1, 1, 3, 3)
>>> input_3x3[:, :, :2, :2].copy_(input)
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> input_3x3
tensor([[[[ 1., 2., 0.],
[ 3., 4., 0.],
[ 0., 0., 0.]]]])
>>> # xdoctest: +IGNORE_WANT("seems to fail when other tests are run in the same session")
>>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False
>>> # Notice that values in top left corner are the same with the small input (except at boundary)
>>> m(input_3x3)
tensor([[[[ 1.0000, 1.2500, 1.7500, 1.5000, 0.5000, 0.0000],
[ 1.5000, 1.7500, 2.2500, 1.8750, 0.6250, 0.0000],
[ 2.5000, 2.7500, 3.2500, 2.6250, 0.8750, 0.0000],
[ 2.2500, 2.4375, 2.8125, 2.2500, 0.7500, 0.0000],
[ 0.7500, 0.8125, 0.9375, 0.7500, 0.2500, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
>>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
>>> # Notice that values in top left corner are now changed
>>> m(input_3x3)
tensor([[[[ 1.0000, 1.4000, 1.8000, 1.6000, 0.8000, 0.0000],
[ 1.8000, 2.2000, 2.6000, 2.2400, 1.1200, 0.0000],
[ 2.6000, 3.0000, 3.4000, 2.8800, 1.4400, 0.0000],
[ 2.4000, 2.7200, 3.0400, 2.5600, 1.2800, 0.0000],
[ 1.2000, 1.3600, 1.5200, 1.2800, 0.6400, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
"""
__constants__ = ['size', 'scale_factor', 'mode', 'align_corners', 'name', 'recompute_scale_factor']
name: str
size: Optional[_size_any_t]
scale_factor: Optional[_ratio_any_t]
mode: str
align_corners: Optional[bool]
recompute_scale_factor: Optional[bool]
def __init__(self, size: Optional[_size_any_t] = None, scale_factor: Optional[_ratio_any_t] = None,
mode: str = 'nearest', align_corners: Optional[bool] = None,
recompute_scale_factor: Optional[bool] = None) -> None:
super(Upsample, self).__init__()
self.name = type(self).__name__
self.size = size
if isinstance(scale_factor, tuple):
self.scale_factor = tuple(float(factor) for factor in scale_factor)
else:
self.scale_factor = float(scale_factor) if scale_factor else None
self.mode = mode
self.align_corners = align_corners
self.recompute_scale_factor = recompute_scale_factor
def forward(self, input: Tensor) -> Tensor:
return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners,
recompute_scale_factor=self.recompute_scale_factor)
def extra_repr(self) -> str:
if self.scale_factor is not None:
info = 'scale_factor=' + str(self.scale_factor)
else:
info = 'size=' + str(self.size)
info += ', mode=' + self.mode
return info
class UpsamplingNearest2d(Upsample):
r"""Applies a 2D nearest neighbor upsampling to an input signal composed of several input
channels.
To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
as it's constructor argument.
When :attr:`size` is given, it is the output size of the image `(h, w)`.
Args:
size (int or Tuple[int, int], optional): output spatial sizes
scale_factor (float or Tuple[float, float], optional): multiplier for
spatial size.
.. warning::
This class is deprecated in favor of :func:`~nn.functional.interpolate`.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
.. math::
W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
Examples::
>>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
>>> input
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> m = nn.UpsamplingNearest2d(scale_factor=2)
>>> m(input)
tensor([[[[ 1., 1., 2., 2.],
[ 1., 1., 2., 2.],
[ 3., 3., 4., 4.],
[ 3., 3., 4., 4.]]]])
"""
def __init__(self, size: Optional[_size_2_t] = None, scale_factor: Optional[_ratio_2_t] = None) -> None:
super(UpsamplingNearest2d, self).__init__(size, scale_factor, mode='nearest')
class UpsamplingBilinear2d(Upsample):
r"""Applies a 2D bilinear upsampling to an input signal composed of several input
channels.
To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
as it's constructor argument.
When :attr:`size` is given, it is the output size of the image `(h, w)`.
Args:
size (int or Tuple[int, int], optional): output spatial sizes
scale_factor (float or Tuple[float, float], optional): multiplier for
spatial size.
.. warning::
This class is deprecated in favor of :func:`~nn.functional.interpolate`. It is
equivalent to ``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
.. math::
W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
Examples::
>>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
>>> input
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> # xdoctest: +IGNORE_WANT("do other tests modify the global state?")
>>> m = nn.UpsamplingBilinear2d(scale_factor=2)
>>> m(input)
tensor([[[[ 1.0000, 1.3333, 1.6667, 2.0000],
[ 1.6667, 2.0000, 2.3333, 2.6667],
[ 2.3333, 2.6667, 3.0000, 3.3333],
[ 3.0000, 3.3333, 3.6667, 4.0000]]]])
"""
def __init__(self, size: Optional[_size_2_t] = None, scale_factor: Optional[_ratio_2_t] = None) -> None:
super(UpsamplingBilinear2d, self).__init__(size, scale_factor, mode='bilinear', align_corners=True)
| pytorch-master | torch/nn/modules/upsampling.py |
from .module import Module
from .. import functional as F
from torch import Tensor
__all__ = ['ChannelShuffle']
class ChannelShuffle(Module):
r"""Divide the channels in a tensor of shape :math:`(*, C , H, W)`
into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`,
while keeping the original tensor shape.
Args:
groups (int): number of groups to divide channels in.
Examples::
>>> # xdoctest: +IGNORE_WANT("FIXME: incorrect want")
>>> channel_shuffle = nn.ChannelShuffle(2)
>>> input = torch.randn(1, 4, 2, 2)
>>> print(input)
[[[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]],
[[9, 10],
[11, 12]],
[[13, 14],
[15, 16]],
]]
>>> output = channel_shuffle(input)
>>> print(output)
[[[[1, 2],
[3, 4]],
[[9, 10],
[11, 12]],
[[5, 6],
[7, 8]],
[[13, 14],
[15, 16]],
]]
"""
__constants__ = ['groups']
groups: int
def __init__(self, groups: int) -> None:
super(ChannelShuffle, self).__init__()
self.groups = groups
def forward(self, input: Tensor) -> Tensor:
return F.channel_shuffle(input, self.groups)
def extra_repr(self) -> str:
return 'groups={}'.format(self.groups)
| pytorch-master | torch/nn/modules/channelshuffle.py |
from torch import Tensor
from .batchnorm import _LazyNormBase, _NormBase
from .. import functional as F
__all__ = ['InstanceNorm1d', 'InstanceNorm2d', 'InstanceNorm3d', 'LazyInstanceNorm1d',
'LazyInstanceNorm2d', 'LazyInstanceNorm3d']
class _InstanceNorm(_NormBase):
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: float = 0.1,
affine: bool = False,
track_running_stats: bool = False,
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(_InstanceNorm, self).__init__(
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs)
def _check_input_dim(self, input):
raise NotImplementedError
def _get_no_batch_dim(self):
raise NotImplementedError
def _handle_no_batch_input(self, input):
return self._apply_instance_norm(input.unsqueeze(0)).squeeze(0)
def _apply_instance_norm(self, input):
return F.instance_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
# at version 1: removed running_mean and running_var when
# track_running_stats=False (default)
if version is None and not self.track_running_stats:
running_stats_keys = []
for name in ('running_mean', 'running_var'):
key = prefix + name
if key in state_dict:
running_stats_keys.append(key)
if len(running_stats_keys) > 0:
error_msgs.append(
'Unexpected running stats buffer(s) {names} for {klass} '
'with track_running_stats=False. If state_dict is a '
'checkpoint saved before 0.4.0, this may be expected '
'because {klass} does not track running stats by default '
'since 0.4.0. Please remove these keys from state_dict. If '
'the running stats are actually needed, instead set '
'track_running_stats=True in {klass} to enable them. See '
'the documentation of {klass} for details.'
.format(names=" and ".join('"{}"'.format(k) for k in running_stats_keys),
klass=self.__class__.__name__))
for key in running_stats_keys:
state_dict.pop(key)
super(_InstanceNorm, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, input: Tensor) -> Tensor:
self._check_input_dim(input)
if input.dim() == self._get_no_batch_dim():
return self._handle_no_batch_input(input)
return self._apply_instance_norm(input)
class InstanceNorm1d(_InstanceNorm):
r"""Applies Instance Normalization over a 2D (unbatched) or 3D (batched) input
as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`__.
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the number of features or channels of the input) if :attr:`affine` is ``True``.
The standard-deviation is calculated via the biased estimator, equivalent to
`torch.var(input, unbiased=False)`.
By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.
If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
.. note::
:class:`InstanceNorm1d` and :class:`LayerNorm` are very similar, but
have some subtle differences. :class:`InstanceNorm1d` is applied
on each channel of channeled data like multidimensional time series, but
:class:`LayerNorm` is usually applied on entire sample and often in NLP
tasks. Additionally, :class:`LayerNorm` applies elementwise affine
transform, while :class:`InstanceNorm1d` usually don't apply affine
transform.
Args:
num_features: number of features or channels :math:`C` of the input
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters, initialized the same way as done for batch normalization.
Default: ``False``.
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, L)` or :math:`(C, L)`
- Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input)
Examples::
>>> # Without Learnable Parameters
>>> m = nn.InstanceNorm1d(100)
>>> # With Learnable Parameters
>>> m = nn.InstanceNorm1d(100, affine=True)
>>> input = torch.randn(20, 100, 40)
>>> output = m(input)
"""
def _get_no_batch_dim(self):
return 2
def _check_input_dim(self, input):
if input.dim() not in (2, 3):
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
class LazyInstanceNorm1d(_LazyNormBase, _InstanceNorm):
r"""A :class:`torch.nn.InstanceNorm1d` module with lazy initialization of
the ``num_features`` argument of the :class:`InstanceNorm1d` that is inferred
from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight`, `bias`,
`running_mean` and `running_var`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, L)` or :math:`(C, L)`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters, initialized the same way as done for batch normalization.
Default: ``False``.
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, L)` or :math:`(C, L)`
- Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input)
"""
cls_to_become = InstanceNorm1d # type: ignore[assignment]
def _get_no_batch_dim(self):
return 2
def _check_input_dim(self, input):
if input.dim() not in (2, 3):
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
class InstanceNorm2d(_InstanceNorm):
r"""Applies Instance Normalization over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`__.
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size) if :attr:`affine` is ``True``.
The standard-deviation is calculated via the biased estimator, equivalent to
`torch.var(input, unbiased=False)`.
By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.
If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
.. note::
:class:`InstanceNorm2d` and :class:`LayerNorm` are very similar, but
have some subtle differences. :class:`InstanceNorm2d` is applied
on each channel of channeled data like RGB images, but
:class:`LayerNorm` is usually applied on entire sample and often in NLP
tasks. Additionally, :class:`LayerNorm` applies elementwise affine
transform, while :class:`InstanceNorm2d` usually don't apply affine
transform.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, H, W)` or :math:`(C, H, W)`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters, initialized the same way as done for batch normalization.
Default: ``False``.
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
- Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
Examples::
>>> # Without Learnable Parameters
>>> m = nn.InstanceNorm2d(100)
>>> # With Learnable Parameters
>>> m = nn.InstanceNorm2d(100, affine=True)
>>> input = torch.randn(20, 100, 35, 45)
>>> output = m(input)
"""
def _get_no_batch_dim(self):
return 3
def _check_input_dim(self, input):
if input.dim() not in (3, 4):
raise ValueError('expected 3D or 4D input (got {}D input)'
.format(input.dim()))
class LazyInstanceNorm2d(_LazyNormBase, _InstanceNorm):
r"""A :class:`torch.nn.InstanceNorm2d` module with lazy initialization of
the ``num_features`` argument of the :class:`InstanceNorm2d` that is inferred
from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight`, `bias`,
`running_mean` and `running_var`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, H, W)` or :math:`(C, H, W)`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters, initialized the same way as done for batch normalization.
Default: ``False``.
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
- Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
"""
cls_to_become = InstanceNorm2d # type: ignore[assignment]
def _get_no_batch_dim(self):
return 3
def _check_input_dim(self, input):
if input.dim() not in (3, 4):
raise ValueError('expected 3D or 4D input (got {}D input)'
.format(input.dim()))
class InstanceNorm3d(_InstanceNorm):
r"""Applies Instance Normalization over a 5D input (a mini-batch of 3D inputs
with additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`__.
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size C (where C is the input size) if :attr:`affine` is ``True``.
The standard-deviation is calculated via the biased estimator, equivalent to
`torch.var(input, unbiased=False)`.
By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.
If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
.. note::
:class:`InstanceNorm3d` and :class:`LayerNorm` are very similar, but
have some subtle differences. :class:`InstanceNorm3d` is applied
on each channel of channeled data like 3D models with RGB color, but
:class:`LayerNorm` is usually applied on entire sample and often in NLP
tasks. Additionally, :class:`LayerNorm` applies elementwise affine
transform, while :class:`InstanceNorm3d` usually don't apply affine
transform.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters, initialized the same way as done for batch normalization.
Default: ``False``.
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input)
Examples::
>>> # Without Learnable Parameters
>>> m = nn.InstanceNorm3d(100)
>>> # With Learnable Parameters
>>> m = nn.InstanceNorm3d(100, affine=True)
>>> input = torch.randn(20, 100, 35, 45, 10)
>>> output = m(input)
"""
def _get_no_batch_dim(self):
return 4
def _check_input_dim(self, input):
if input.dim() not in (4, 5):
raise ValueError('expected 4D or 5D input (got {}D input)'
.format(input.dim()))
class LazyInstanceNorm3d(_LazyNormBase, _InstanceNorm):
r"""A :class:`torch.nn.InstanceNorm3d` module with lazy initialization of
the ``num_features`` argument of the :class:`InstanceNorm3d` that is inferred
from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight`, `bias`,
`running_mean` and `running_var`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters, initialized the same way as done for batch normalization.
Default: ``False``.
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input)
"""
cls_to_become = InstanceNorm3d # type: ignore[assignment]
def _get_no_batch_dim(self):
return 4
def _check_input_dim(self, input):
if input.dim() not in (4, 5):
raise ValueError('expected 4D or 5D input (got {}D input)'
.format(input.dim()))
| pytorch-master | torch/nn/modules/instancenorm.py |
from .module import Module
from typing import Tuple, Union
from torch import Tensor
from torch.types import _size
__all__ = ['Flatten', 'Unflatten']
class Flatten(Module):
r"""
Flattens a contiguous range of dims into a tensor. For use with :class:`~nn.Sequential`.
Shape:
- Input: :math:`(*, S_{\text{start}},..., S_{i}, ..., S_{\text{end}}, *)`,'
where :math:`S_{i}` is the size at dimension :math:`i` and :math:`*` means any
number of dimensions including none.
- Output: :math:`(*, \prod_{i=\text{start}}^{\text{end}} S_{i}, *)`.
Args:
start_dim: first dim to flatten (default = 1).
end_dim: last dim to flatten (default = -1).
Examples::
>>> input = torch.randn(32, 1, 5, 5)
>>> # With default parameters
>>> m = nn.Flatten()
>>> output = m(input)
>>> output.size()
torch.Size([32, 25])
>>> # With non-default parameters
>>> m = nn.Flatten(0, 2)
>>> output = m(input)
>>> output.size()
torch.Size([160, 5])
"""
__constants__ = ['start_dim', 'end_dim']
start_dim: int
end_dim: int
def __init__(self, start_dim: int = 1, end_dim: int = -1) -> None:
super(Flatten, self).__init__()
self.start_dim = start_dim
self.end_dim = end_dim
def forward(self, input: Tensor) -> Tensor:
return input.flatten(self.start_dim, self.end_dim)
def extra_repr(self) -> str:
return 'start_dim={}, end_dim={}'.format(
self.start_dim, self.end_dim
)
class Unflatten(Module):
r"""
Unflattens a tensor dim expanding it to a desired shape. For use with :class:`~nn.Sequential`.
* :attr:`dim` specifies the dimension of the input tensor to be unflattened, and it can
be either `int` or `str` when `Tensor` or `NamedTensor` is used, respectively.
* :attr:`unflattened_size` is the new shape of the unflattened dimension of the tensor and it can be
a `tuple` of ints or a `list` of ints or `torch.Size` for `Tensor` input; a `NamedShape`
(tuple of `(name, size)` tuples) for `NamedTensor` input.
Shape:
- Input: :math:`(*, S_{\text{dim}}, *)`, where :math:`S_{\text{dim}}` is the size at
dimension :attr:`dim` and :math:`*` means any number of dimensions including none.
- Output: :math:`(*, U_1, ..., U_n, *)`, where :math:`U` = :attr:`unflattened_size` and
:math:`\prod_{i=1}^n U_i = S_{\text{dim}}`.
Args:
dim (Union[int, str]): Dimension to be unflattened
unflattened_size (Union[torch.Size, Tuple, List, NamedShape]): New shape of the unflattened dimension
Examples:
>>> input = torch.randn(2, 50)
>>> # With tuple of ints
>>> m = nn.Sequential(
>>> nn.Linear(50, 50),
>>> nn.Unflatten(1, (2, 5, 5))
>>> )
>>> output = m(input)
>>> output.size()
torch.Size([2, 2, 5, 5])
>>> # With torch.Size
>>> m = nn.Sequential(
>>> nn.Linear(50, 50),
>>> nn.Unflatten(1, torch.Size([2, 5, 5]))
>>> )
>>> output = m(input)
>>> output.size()
torch.Size([2, 2, 5, 5])
>>> # With namedshape (tuple of tuples)
>>> input = torch.randn(2, 50, names=('N', 'features'))
>>> unflatten = nn.Unflatten('features', (('C', 2), ('H', 5), ('W', 5)))
>>> output = unflatten(input)
>>> output.size()
torch.Size([2, 2, 5, 5])
"""
NamedShape = Tuple[Tuple[str, int]]
__constants__ = ['dim', 'unflattened_size']
dim: Union[int, str]
unflattened_size: Union[_size, NamedShape]
def __init__(self, dim: Union[int, str], unflattened_size: Union[_size, NamedShape]) -> None:
super(Unflatten, self).__init__()
if isinstance(dim, int):
self._require_tuple_int(unflattened_size)
elif isinstance(dim, str):
self._require_tuple_tuple(unflattened_size)
else:
raise TypeError("invalid argument type for dim parameter")
self.dim = dim
self.unflattened_size = unflattened_size
def _require_tuple_tuple(self, input):
if (isinstance(input, tuple)):
for idx, elem in enumerate(input):
if not isinstance(elem, tuple):
raise TypeError("unflattened_size must be tuple of tuples, " +
"but found element of type {} at pos {}".format(type(elem).__name__, idx))
return
raise TypeError("unflattened_size must be a tuple of tuples, " +
"but found type {}".format(type(input).__name__))
def _require_tuple_int(self, input):
if (isinstance(input, (tuple, list))):
for idx, elem in enumerate(input):
if not isinstance(elem, int):
raise TypeError("unflattened_size must be tuple of ints, " +
"but found element of type {} at pos {}".format(type(elem).__name__, idx))
return
raise TypeError("unflattened_size must be a tuple of ints, but found type {}".format(type(input).__name__))
def forward(self, input: Tensor) -> Tensor:
return input.unflatten(self.dim, self.unflattened_size)
def extra_repr(self) -> str:
return 'dim={}, unflattened_size={}'.format(self.dim, self.unflattened_size)
| pytorch-master | torch/nn/modules/flatten.py |
from typing import Optional, Any
import torch
from torch import Tensor
from torch.nn.parameter import Parameter, UninitializedParameter, UninitializedBuffer
from .. import functional as F
from .. import init
from ._functions import SyncBatchNorm as sync_batch_norm
from .lazy import LazyModuleMixin
from .module import Module
__all__ = ['BatchNorm1d', 'LazyBatchNorm1d', 'BatchNorm2d', 'LazyBatchNorm2d', 'BatchNorm3d',
'LazyBatchNorm3d', 'SyncBatchNorm']
class _NormBase(Module):
"""Common base of _InstanceNorm and _BatchNorm"""
_version = 2
__constants__ = ["track_running_stats", "momentum", "eps", "num_features", "affine"]
num_features: int
eps: float
momentum: float
affine: bool
track_running_stats: bool
# WARNING: weight and bias purposely not defined here.
# See https://github.com/pytorch/pytorch/issues/39670
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: float = 0.1,
affine: bool = True,
track_running_stats: bool = True,
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(_NormBase, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
if self.affine:
self.weight = Parameter(torch.empty(num_features, **factory_kwargs))
self.bias = Parameter(torch.empty(num_features, **factory_kwargs))
else:
self.register_parameter("weight", None)
self.register_parameter("bias", None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features, **factory_kwargs))
self.register_buffer('running_var', torch.ones(num_features, **factory_kwargs))
self.running_mean: Optional[Tensor]
self.running_var: Optional[Tensor]
self.register_buffer('num_batches_tracked',
torch.tensor(0, dtype=torch.long,
**{k: v for k, v in factory_kwargs.items() if k != 'dtype'}))
self.num_batches_tracked: Optional[Tensor]
else:
self.register_buffer("running_mean", None)
self.register_buffer("running_var", None)
self.register_buffer("num_batches_tracked", None)
self.reset_parameters()
def reset_running_stats(self) -> None:
if self.track_running_stats:
# running_mean/running_var/num_batches... are registered at runtime depending
# if self.track_running_stats is on
self.running_mean.zero_() # type: ignore[union-attr]
self.running_var.fill_(1) # type: ignore[union-attr]
self.num_batches_tracked.zero_() # type: ignore[union-attr,operator]
def reset_parameters(self) -> None:
self.reset_running_stats()
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def _check_input_dim(self, input):
raise NotImplementedError
def extra_repr(self):
return (
"{num_features}, eps={eps}, momentum={momentum}, affine={affine}, "
"track_running_stats={track_running_stats}".format(**self.__dict__)
)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if (version is None or version < 2) and self.track_running_stats:
# at version 2: added num_batches_tracked buffer
# this should have a default value of 0
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key not in state_dict:
state_dict[num_batches_tracked_key] = torch.tensor(0, dtype=torch.long)
super(_NormBase, self)._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
class _BatchNorm(_NormBase):
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: float = 0.1,
affine: bool = True,
track_running_stats: bool = True,
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(_BatchNorm, self).__init__(
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
)
def forward(self, input: Tensor) -> Tensor:
self._check_input_dim(input)
# exponential_average_factor is set to self.momentum
# (when it is available) only so that it gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None: # type: ignore[has-type]
self.num_batches_tracked.add_(1) # type: ignore[has-type]
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
r"""
Decide whether the mini-batch stats should be used for normalization rather than the buffers.
Mini-batch stats are used in training mode, and in eval mode when buffers are None.
"""
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
r"""
Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
used for normalization (i.e. in eval mode when buffers are not None).
"""
return F.batch_norm(
input,
# If buffers are not to be tracked, ensure that they won't be updated
self.running_mean
if not self.training or self.track_running_stats
else None,
self.running_var if not self.training or self.track_running_stats else None,
self.weight,
self.bias,
bn_training,
exponential_average_factor,
self.eps,
)
class _LazyNormBase(LazyModuleMixin, _NormBase):
weight: UninitializedParameter # type: ignore[assignment]
bias: UninitializedParameter # type: ignore[assignment]
def __init__(self, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(_LazyNormBase, self).__init__(
# affine and track_running_stats are hardcoded to False to
# avoid creating tensors that will soon be overwritten.
0,
eps,
momentum,
False,
False,
**factory_kwargs,
)
self.affine = affine
self.track_running_stats = track_running_stats
if self.affine:
self.weight = UninitializedParameter(**factory_kwargs)
self.bias = UninitializedParameter(**factory_kwargs)
if self.track_running_stats:
self.running_mean = UninitializedBuffer(**factory_kwargs)
self.running_var = UninitializedBuffer(**factory_kwargs)
self.num_batches_tracked = torch.tensor(
0, dtype=torch.long, **{k: v for k, v in factory_kwargs.items() if k != 'dtype'})
def reset_parameters(self) -> None:
if not self.has_uninitialized_params() and self.num_features != 0:
super().reset_parameters()
def initialize_parameters(self, input) -> None: # type: ignore[override]
if self.has_uninitialized_params():
self.num_features = input.shape[1]
if self.affine:
assert isinstance(self.weight, UninitializedParameter)
assert isinstance(self.bias, UninitializedParameter)
self.weight.materialize((self.num_features,))
self.bias.materialize((self.num_features,))
if self.track_running_stats:
self.running_mean.materialize((self.num_features,)) # type:ignore[union-attr]
self.running_var.materialize((self.num_features,)) # type:ignore[union-attr]
self.reset_parameters()
class BatchNorm1d(_BatchNorm):
r"""Applies Batch Normalization over a 2D or 3D input as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
.. math::
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the number of features or channels of the input). By default, the
elements of :math:`\gamma` are set to 1 and the elements of :math:`\beta` are set to 0. The
standard-deviation is calculated via the biased estimator, equivalent to `torch.var(input, unbiased=False)`.
Also by default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization.
Args:
num_features: number of features or channels :math:`C` of the input
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics, and initializes statistics
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
When these buffers are ``None``, this module always uses batch statistics.
in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`, where :math:`N` is the batch size,
:math:`C` is the number of features or channels, and :math:`L` is the sequence length
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm1d(100, affine=False)
>>> input = torch.randn(20, 100)
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError(
"expected 2D or 3D input (got {}D input)".format(input.dim())
)
class LazyBatchNorm1d(_LazyNormBase, _BatchNorm):
r"""A :class:`torch.nn.BatchNorm1d` module with lazy initialization of
the ``num_features`` argument of the :class:`BatchNorm1d` that is inferred
from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight`, `bias`,
`running_mean` and `running_var`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics, and initializes statistics
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
When these buffers are ``None``, this module always uses batch statistics.
in both training and eval modes. Default: ``True``
"""
cls_to_become = BatchNorm1d # type: ignore[assignment]
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError(
"expected 2D or 3D input (got {}D input)".format(input.dim())
)
class BatchNorm2d(_BatchNorm):
r"""Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set
to 1 and the elements of :math:`\beta` are set to 0. The standard-deviation is calculated
via the biased estimator, equivalent to `torch.var(input, unbiased=False)`.
Also by default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, H, W)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics, and initializes statistics
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
When these buffers are ``None``, this module always uses batch statistics.
in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm2d(100, affine=False)
>>> input = torch.randn(20, 100, 35, 45)
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError("expected 4D input (got {}D input)".format(input.dim()))
class LazyBatchNorm2d(_LazyNormBase, _BatchNorm):
r"""A :class:`torch.nn.BatchNorm2d` module with lazy initialization of
the ``num_features`` argument of the :class:`BatchNorm2d` that is inferred
from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight`, `bias`,
`running_mean` and `running_var`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics, and initializes statistics
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
When these buffers are ``None``, this module always uses batch statistics.
in both training and eval modes. Default: ``True``
"""
cls_to_become = BatchNorm2d # type: ignore[assignment]
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError("expected 4D input (got {}D input)".format(input.dim()))
class BatchNorm3d(_BatchNorm):
r"""Applies Batch Normalization over a 5D input (a mini-batch of 3D inputs
with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set
to 1 and the elements of :math:`\beta` are set to 0. The standard-deviation is calculated
via the biased estimator, equivalent to `torch.var(input, unbiased=False)`.
Also by default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric Batch Normalization
or Spatio-temporal Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, D, H, W)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics, and initializes statistics
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
When these buffers are ``None``, this module always uses batch statistics.
in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm3d(100, affine=False)
>>> input = torch.randn(20, 100, 35, 45, 10)
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError("expected 5D input (got {}D input)".format(input.dim()))
class LazyBatchNorm3d(_LazyNormBase, _BatchNorm):
r"""A :class:`torch.nn.BatchNorm3d` module with lazy initialization of
the ``num_features`` argument of the :class:`BatchNorm3d` that is inferred
from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight`, `bias`,
`running_mean` and `running_var`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics, and initializes statistics
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
When these buffers are ``None``, this module always uses batch statistics.
in both training and eval modes. Default: ``True``
"""
cls_to_become = BatchNorm3d # type: ignore[assignment]
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError("expected 5D input (got {}D input)".format(input.dim()))
class SyncBatchNorm(_BatchNorm):
r"""Applies Batch Normalization over a N-Dimensional input (a mini-batch of [N-2]D inputs
with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over all
mini-batches of the same process groups. :math:`\gamma` and :math:`\beta`
are learnable parameter vectors of size `C` (where `C` is the input size).
By default, the elements of :math:`\gamma` are sampled from
:math:`\mathcal{U}(0, 1)` and the elements of :math:`\beta` are set to 0.
The standard-deviation is calculated via the biased estimator, equivalent to
`torch.var(input, unbiased=False)`.
Also by default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done for each channel in the ``C`` dimension, computing
statistics on ``(N, +)`` slices, it's common terminology to call this Volumetric Batch
Normalization or Spatio-temporal Batch Normalization.
Currently :class:`SyncBatchNorm` only supports
:class:`~torch.nn.DistributedDataParallel` (DDP) with single GPU per process. Use
:meth:`torch.nn.SyncBatchNorm.convert_sync_batchnorm()` to convert
:attr:`BatchNorm*D` layer to :class:`SyncBatchNorm` before wrapping
Network with DDP.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, +)`
eps: a value added to the denominator for numerical stability.
Default: ``1e-5``
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics, and initializes statistics
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
When these buffers are ``None``, this module always uses batch statistics.
in both training and eval modes. Default: ``True``
process_group: synchronization of stats happen within each process group
individually. Default behavior is synchronization across the whole
world
Shape:
- Input: :math:`(N, C, +)`
- Output: :math:`(N, C, +)` (same shape as input)
.. note::
Synchronization of batchnorm statistics occurs only while training, i.e.
synchronization is disabled when ``model.eval()`` is set or if
``self.training`` is otherwise ``False``.
Examples::
>>> # With Learnable Parameters
>>> m = nn.SyncBatchNorm(100)
>>> # creating process group (optional)
>>> # ranks is a list of int identifying rank ids.
>>> ranks = list(range(8))
>>> r1, r2 = ranks[:4], ranks[4:]
>>> # Note: every rank calls into new_group for every
>>> # process group created, even if that rank is not
>>> # part of the group.
>>> # xdoctest: +SKIP
>>> process_groups = [torch.distributed.new_group(pids) for pids in [r1, r2]]
>>> process_group = process_groups[0 if dist.get_rank() <= 3 else 1]
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm3d(100, affine=False, process_group=process_group)
>>> input = torch.randn(20, 100, 35, 45, 10)
>>> output = m(input)
>>> # network is nn.BatchNorm layer
>>> sync_bn_network = nn.SyncBatchNorm.convert_sync_batchnorm(network, process_group)
>>> # only single gpu per process is currently supported
>>> ddp_sync_bn_network = torch.nn.parallel.DistributedDataParallel(
>>> sync_bn_network,
>>> device_ids=[args.local_rank],
>>> output_device=args.local_rank)
"""
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: float = 0.1,
affine: bool = True,
track_running_stats: bool = True,
process_group: Optional[Any] = None,
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(SyncBatchNorm, self).__init__(
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
)
self.process_group = process_group
def _check_input_dim(self, input):
if input.dim() < 2:
raise ValueError(
"expected at least 2D input (got {}D input)".format(input.dim())
)
def _check_non_zero_input_channels(self, input):
if input.size(1) == 0:
raise ValueError(
"SyncBatchNorm number of input channels should be non-zero"
)
def forward(self, input: Tensor) -> Tensor:
# currently only GPU input is supported
if not input.is_cuda:
raise ValueError("SyncBatchNorm expected input tensor to be on GPU")
self._check_input_dim(input)
self._check_non_zero_input_channels(input)
# exponential_average_factor is set to self.momentum
# (when it is available) only so that it gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
assert self.num_batches_tracked is not None
self.num_batches_tracked.add_(1)
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
r"""
Decide whether the mini-batch stats should be used for normalization rather than the buffers.
Mini-batch stats are used in training mode, and in eval mode when buffers are None.
"""
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
r"""
Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
used for normalization (i.e. in eval mode when buffers are not None).
"""
# If buffers are not to be tracked, ensure that they won't be updated
running_mean = (
self.running_mean if not self.training or self.track_running_stats else None
)
running_var = (
self.running_var if not self.training or self.track_running_stats else None
)
# Don't sync batchnorm stats in inference mode (model.eval()).
need_sync = (bn_training and self.training)
if need_sync:
process_group = torch.distributed.group.WORLD
if self.process_group:
process_group = self.process_group
world_size = torch.distributed.get_world_size(process_group)
need_sync = world_size > 1
# fallback to framework BN when synchronization is not necessary
if not need_sync:
return F.batch_norm(
input,
running_mean,
running_var,
self.weight,
self.bias,
bn_training,
exponential_average_factor,
self.eps,
)
else:
assert bn_training
return sync_batch_norm.apply(
input,
self.weight,
self.bias,
running_mean,
running_var,
self.eps,
exponential_average_factor,
process_group,
world_size,
)
@classmethod
def convert_sync_batchnorm(cls, module, process_group=None):
r"""Helper function to convert all :attr:`BatchNorm*D` layers in the model to
:class:`torch.nn.SyncBatchNorm` layers.
Args:
module (nn.Module): module containing one or more :attr:`BatchNorm*D` layers
process_group (optional): process group to scope synchronization,
default is the whole world
Returns:
The original :attr:`module` with the converted :class:`torch.nn.SyncBatchNorm`
layers. If the original :attr:`module` is a :attr:`BatchNorm*D` layer,
a new :class:`torch.nn.SyncBatchNorm` layer object will be returned
instead.
Example::
>>> # Network with nn.BatchNorm layer
>>> module = torch.nn.Sequential(
>>> torch.nn.Linear(20, 100),
>>> torch.nn.BatchNorm1d(100),
>>> ).cuda()
>>> # creating process group (optional)
>>> # ranks is a list of int identifying rank ids.
>>> ranks = list(range(8))
>>> r1, r2 = ranks[:4], ranks[4:]
>>> # Note: every rank calls into new_group for every
>>> # process group created, even if that rank is not
>>> # part of the group.
>>> # xdoctest: +SKIP
>>> process_groups = [torch.distributed.new_group(pids) for pids in [r1, r2]]
>>> process_group = process_groups[0 if dist.get_rank() <= 3 else 1]
>>> sync_bn_module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
"""
module_output = module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module_output = torch.nn.SyncBatchNorm(
module.num_features,
module.eps,
module.momentum,
module.affine,
module.track_running_stats,
process_group,
)
if module.affine:
with torch.no_grad():
module_output.weight = module.weight
module_output.bias = module.bias
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
if hasattr(module, "qconfig"):
module_output.qconfig = module.qconfig
for name, child in module.named_children():
module_output.add_module(
name, cls.convert_sync_batchnorm(child, process_group)
)
del module
return module_output
| pytorch-master | torch/nn/modules/batchnorm.py |
import math
from typing import Any
import torch
from torch import Tensor
from torch.nn.parameter import Parameter, UninitializedParameter
from .. import functional as F
from .. import init
from .module import Module
from .lazy import LazyModuleMixin
__all__ = [
'Bilinear',
'Identity',
'LazyLinear',
'Linear',
]
class Identity(Module):
r"""A placeholder identity operator that is argument-insensitive.
Args:
args: any argument (unused)
kwargs: any keyword argument (unused)
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
Examples::
>>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 20])
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(Identity, self).__init__()
def forward(self, input: Tensor) -> Tensor:
return input
class Linear(Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(*, H_{in})` where :math:`*` means any number of
dimensions including none and :math:`H_{in} = \text{in\_features}`.
- Output: :math:`(*, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
Attributes:
weight: the learnable weights of the module of shape
:math:`(\text{out\_features}, \text{in\_features})`. The values are
initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
:math:`k = \frac{1}{\text{in\_features}}`
bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{1}{\text{in\_features}}`
Examples::
>>> m = nn.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
# Setting a=sqrt(5) in kaiming_uniform is the same as initializing with
# uniform(-1/sqrt(in_features), 1/sqrt(in_features)). For details, see
# https://github.com/pytorch/pytorch/issues/57109
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
return F.linear(input, self.weight, self.bias)
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
# This class exists solely to avoid triggering an obscure error when scripting
# an improperly quantized attention layer. See this issue for details:
# https://github.com/pytorch/pytorch/issues/58969
# TODO: fail fast on quantization API usage error, then remove this class
# and replace uses of it with plain Linear
class NonDynamicallyQuantizableLinear(Linear):
def __init__(self, in_features: int, out_features: int, bias: bool = True,
device=None, dtype=None) -> None:
super().__init__(in_features, out_features, bias=bias,
device=device, dtype=dtype)
class Bilinear(Module):
r"""Applies a bilinear transformation to the incoming data:
:math:`y = x_1^T A x_2 + b`
Args:
in1_features: size of each first input sample
in2_features: size of each second input sample
out_features: size of each output sample
bias: If set to False, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input1: :math:`(*, H_{in1})` where :math:`H_{in1}=\text{in1\_features}` and
:math:`*` means any number of additional dimensions including none. All but the last dimension
of the inputs should be the same.
- Input2: :math:`(*, H_{in2})` where :math:`H_{in2}=\text{in2\_features}`.
- Output: :math:`(*, H_{out})` where :math:`H_{out}=\text{out\_features}`
and all but the last dimension are the same shape as the input.
Attributes:
weight: the learnable weights of the module of shape
:math:`(\text{out\_features}, \text{in1\_features}, \text{in2\_features})`.
The values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
:math:`k = \frac{1}{\text{in1\_features}}`
bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
:math:`k = \frac{1}{\text{in1\_features}}`
Examples::
>>> m = nn.Bilinear(20, 30, 40)
>>> input1 = torch.randn(128, 20)
>>> input2 = torch.randn(128, 30)
>>> output = m(input1, input2)
>>> print(output.size())
torch.Size([128, 40])
"""
__constants__ = ['in1_features', 'in2_features', 'out_features']
in1_features: int
in2_features: int
out_features: int
weight: Tensor
def __init__(self, in1_features: int, in2_features: int, out_features: int, bias: bool = True,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(Bilinear, self).__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.weight = Parameter(torch.empty((out_features, in1_features, in2_features), **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
bound = 1 / math.sqrt(self.weight.size(1))
init.uniform_(self.weight, -bound, bound)
if self.bias is not None:
init.uniform_(self.bias, -bound, bound)
def forward(self, input1: Tensor, input2: Tensor) -> Tensor:
return F.bilinear(input1, input2, self.weight, self.bias)
def extra_repr(self) -> str:
return 'in1_features={}, in2_features={}, out_features={}, bias={}'.format(
self.in1_features, self.in2_features, self.out_features, self.bias is not None
)
class LazyLinear(LazyModuleMixin, Linear):
r"""A :class:`torch.nn.Linear` module where `in_features` is inferred.
In this module, the `weight` and `bias` are of :class:`torch.nn.UninitializedParameter`
class. They will be initialized after the first call to ``forward`` is done and the
module will become a regular :class:`torch.nn.Linear` module. The ``in_features`` argument
of the :class:`Linear` is inferred from the ``input.shape[-1]``.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Attributes:
weight: the learnable weights of the module of shape
:math:`(\text{out\_features}, \text{in\_features})`. The values are
initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
:math:`k = \frac{1}{\text{in\_features}}`
bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{1}{\text{in\_features}}`
"""
cls_to_become = Linear # type: ignore[assignment]
weight: UninitializedParameter
bias: UninitializedParameter # type: ignore[assignment]
def __init__(self, out_features: int, bias: bool = True,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
super().__init__(0, 0, False)
self.weight = UninitializedParameter(**factory_kwargs)
self.out_features = out_features
if bias:
self.bias = UninitializedParameter(**factory_kwargs)
def reset_parameters(self) -> None:
if not self.has_uninitialized_params() and self.in_features != 0:
super().reset_parameters()
def initialize_parameters(self, input) -> None: # type: ignore[override]
if self.has_uninitialized_params():
with torch.no_grad():
self.in_features = input.shape[-1]
self.weight.materialize((self.out_features, self.in_features))
if self.bias is not None:
self.bias.materialize((self.out_features,))
self.reset_parameters()
# TODO: PartialLinear - maybe in sparse?
| pytorch-master | torch/nn/modules/linear.py |
import torch
import torch.distributed as dist
from torch.autograd.function import Function
class SyncBatchNorm(Function):
@staticmethod
def forward(self, input, weight, bias, running_mean, running_var, eps, momentum, process_group, world_size):
if not input.is_contiguous(memory_format=torch.channels_last):
input = input.contiguous()
if weight is not None:
weight = weight.contiguous()
size = int(input.numel() // input.size(1))
if size == 1 and world_size < 2:
raise ValueError('Expected more than 1 value per channel when training, got input size {}'.format(size))
num_channels = input.shape[1]
if input.numel() > 0:
# calculate mean/invstd for input.
mean, invstd = torch.batch_norm_stats(input, eps)
count = torch.full(
(1,),
input.numel() // input.size(1),
dtype=mean.dtype,
device=mean.device
)
# C, C, 1 -> (2C + 1)
combined = torch.cat([mean, invstd, count], dim=0)
else:
# for empty input, set stats and the count to zero. The stats with
# zero count will be filtered out later when computing global mean
# & invstd, but they still needs to participate the all_gather
# collective communication to unblock other peer processes.
combined = torch.zeros(
2 * num_channels + 1,
dtype=input.dtype,
device=input.device
)
# Use allgather instead of allreduce because count could be different across
# ranks, simple all reduce op can not give correct results.
# batch_norm_gather_stats_with_counts calculates global mean & invstd based on
# all gathered mean, invstd and count.
# for nccl backend, use the optimized version of all gather.
if process_group._get_backend_name() == 'nccl':
# world_size * (2C + 1)
combined_size = combined.numel()
combined_flat = torch.empty(1,
combined_size * world_size,
dtype=combined.dtype,
device=combined.device)
dist._all_gather_base(combined_flat, combined, process_group, async_op=False)
combined = torch.reshape(combined_flat, (world_size, combined_size))
# world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
else:
# world_size * (2C + 1)
combined_list = [
torch.empty_like(combined) for _ in range(world_size)
]
dist.all_gather(combined_list, combined, process_group, async_op=False)
combined = torch.stack(combined_list, dim=0)
# world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
if not torch.cuda.is_current_stream_capturing():
# The lines below force a synchronization between CUDA and CPU, because
# the shape of the result count_all depends on the values in mask tensor.
# Such synchronizations break CUDA Graph capturing.
# See https://github.com/pytorch/pytorch/issues/78549
# FIXME: https://github.com/pytorch/pytorch/issues/78656 describes
# a better longer-term solution.
# remove stats from empty inputs
mask = count_all.squeeze(-1) >= 1
count_all = count_all[mask]
mean_all = mean_all[mask]
invstd_all = invstd_all[mask]
# calculate global mean & invstd
mean, invstd = torch.batch_norm_gather_stats_with_counts(
input,
mean_all,
invstd_all,
running_mean,
running_var,
momentum,
eps,
count_all.view(-1)
)
self.save_for_backward(input, weight, mean, invstd, count_all.to(torch.int32))
self.process_group = process_group
# apply element-wise normalization
if input.numel() > 0:
return torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps)
else:
return torch.empty_like(input)
@staticmethod
def backward(self, grad_output):
if not grad_output.is_contiguous(memory_format=torch.channels_last):
grad_output = grad_output.contiguous()
saved_input, weight, mean, invstd, count_tensor = self.saved_tensors
grad_input = grad_weight = grad_bias = None
process_group = self.process_group
if saved_input.numel() > 0:
# calculate local stats as well as grad_weight / grad_bias
sum_dy, sum_dy_xmu, grad_weight, grad_bias = torch.batch_norm_backward_reduce(
grad_output,
saved_input,
mean,
invstd,
weight,
self.needs_input_grad[0],
self.needs_input_grad[1],
self.needs_input_grad[2]
)
if self.needs_input_grad[0]:
# synchronizing stats used to calculate input gradient.
num_channels = sum_dy.shape[0]
combined = torch.cat([sum_dy, sum_dy_xmu], dim=0)
torch.distributed.all_reduce(
combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False)
sum_dy, sum_dy_xmu = torch.split(combined, num_channels)
# backward pass for gradient calculation
grad_input = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_tensor
)
# synchronizing of grad_weight / grad_bias is not needed as distributed
# training would handle all reduce.
if weight is None or not self.needs_input_grad[1]:
grad_weight = None
if weight is None or not self.needs_input_grad[2]:
grad_bias = None
else:
# This process got an empty input tensor in the forward pass.
# Although this process can directly set grad_input as an empty
# tensor of zeros, it still needs to participate in the collective
# communication to unblock its peers, as other peer processes might
# have recieved non-empty inputs.
num_channels = saved_input.shape[1]
if self.needs_input_grad[0]:
# launch all_reduce to unblock other peer processes
combined = torch.zeros(
2 * num_channels,
dtype=saved_input.dtype,
device=saved_input.device
)
torch.distributed.all_reduce(
combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False)
# Leave grad_input, grad_weight and grad_bias as None, which will be
# interpreted by the autograd engine as Tensors full of zeros.
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
class CrossMapLRN2d(Function):
@staticmethod
def forward(ctx, input, size, alpha=1e-4, beta=0.75, k=1):
ctx.size = size
ctx.alpha = alpha
ctx.beta = beta
ctx.k = k
ctx.scale = None
assert input.dim() == 4
ctx.scale = ctx.scale or input.new()
output = input.new()
batch_size = input.size(0)
channels = input.size(1)
input_height = input.size(2)
input_width = input.size(3)
output.resize_as_(input)
ctx.scale.resize_as_(input)
# use output storage as temporary buffer
input_square = output
torch.pow(input, 2, out=input_square)
pre_pad = int((ctx.size - 1) / 2 + 1)
pre_pad_crop = channels if pre_pad > channels else pre_pad
scale_first = ctx.scale.select(1, 0)
scale_first.zero_()
# compute first feature map normalization
for c in range(pre_pad_crop):
scale_first.add_(input_square.select(1, c))
# reuse computations for next feature maps normalization
# by adding the next feature map and removing the previous
for c in range(1, channels):
scale_previous = ctx.scale.select(1, c - 1)
scale_current = ctx.scale.select(1, c)
scale_current.copy_(scale_previous)
if c < channels - pre_pad + 1:
square_next = input_square.select(1, c + pre_pad - 1)
scale_current.add_(square_next, alpha=1)
if c > pre_pad:
square_previous = input_square.select(1, c - pre_pad)
scale_current.add_(square_previous, alpha=-1)
ctx.scale.mul_(ctx.alpha / ctx.size).add_(ctx.k)
torch.pow(ctx.scale, -ctx.beta, out=output)
output.mul_(input)
ctx.save_for_backward(input, output)
return output
@staticmethod
def backward(ctx, grad_output):
input, output = ctx.saved_tensors
grad_input = grad_output.new()
batch_size = input.size(0)
channels = input.size(1)
input_height = input.size(2)
input_width = input.size(3)
paddded_ratio = input.new(channels + ctx.size - 1, input_height,
input_width)
accum_ratio = input.new(input_height, input_width)
cache_ratio_value = 2 * ctx.alpha * ctx.beta / ctx.size
inversePrePad = int(ctx.size - (ctx.size - 1) / 2)
grad_input.resize_as_(input)
torch.pow(ctx.scale, -ctx.beta, out=grad_input).mul_(grad_output)
paddded_ratio.zero_()
padded_ratio_center = paddded_ratio.narrow(0, inversePrePad,
channels)
for n in range(batch_size):
torch.mul(grad_output[n], output[n], out=padded_ratio_center)
padded_ratio_center.div_(ctx.scale[n])
torch.sum(
paddded_ratio.narrow(0, 0, ctx.size - 1), 0, keepdim=False, out=accum_ratio)
for c in range(channels):
accum_ratio.add_(paddded_ratio[c + ctx.size - 1])
grad_input[n][c].addcmul_(input[n][c], accum_ratio, value=-cache_ratio_value)
accum_ratio.add_(paddded_ratio[c], alpha=-1)
return grad_input, None, None, None, None
class BackwardHookFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, *args):
ctx.mark_non_differentiable(*[arg for arg in args if not arg.requires_grad])
return args
@staticmethod
def backward(ctx, *args):
return args
| pytorch-master | torch/nn/modules/_functions.py |
from typing import List, Optional
from torch import Tensor
from .module import Module
from .utils import _single, _pair, _triple
from .. import functional as F
from ..common_types import (_size_any_t, _size_1_t, _size_2_t, _size_3_t,
_ratio_3_t, _ratio_2_t, _size_any_opt_t, _size_2_opt_t, _size_3_opt_t)
__all__ = ['MaxPool1d', 'MaxPool2d', 'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d',
'AvgPool1d', 'AvgPool2d', 'AvgPool3d', 'FractionalMaxPool2d', 'FractionalMaxPool3d', 'LPPool1d',
'LPPool2d', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d', 'AdaptiveAvgPool1d',
'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d']
class _MaxPoolNd(Module):
__constants__ = ['kernel_size', 'stride', 'padding', 'dilation',
'return_indices', 'ceil_mode']
return_indices: bool
ceil_mode: bool
def __init__(self, kernel_size: _size_any_t, stride: Optional[_size_any_t] = None,
padding: _size_any_t = 0, dilation: _size_any_t = 1,
return_indices: bool = False, ceil_mode: bool = False) -> None:
super(_MaxPoolNd, self).__init__()
self.kernel_size = kernel_size
self.stride = stride if (stride is not None) else kernel_size
self.padding = padding
self.dilation = dilation
self.return_indices = return_indices
self.ceil_mode = ceil_mode
def extra_repr(self) -> str:
return 'kernel_size={kernel_size}, stride={stride}, padding={padding}' \
', dilation={dilation}, ceil_mode={ceil_mode}'.format(**self.__dict__)
class MaxPool1d(_MaxPoolNd):
r"""Applies a 1D max pooling over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, L)`
and output :math:`(N, C, L_{out})` can be precisely described as:
.. math::
out(N_i, C_j, k) = \max_{m=0, \ldots, \text{kernel\_size} - 1}
input(N_i, C_j, stride \times k + m)
If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
for :attr:`padding` number of points. :attr:`dilation` is the stride between the elements within the
sliding window. This `link`_ has a nice visualization of the pooling parameters.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
or the input. Sliding windows that would start in the right padded region are ignored.
Args:
kernel_size: The size of the sliding window, must be > 0.
stride: The stride of the sliding window, must be > 0. Default value is :attr:`kernel_size`.
padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
dilation: The stride between elements within a sliding window, must be > 0.
return_indices: If ``True``, will return the argmax along with the max values.
Useful for :class:`torch.nn.MaxUnpool1d` later
ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
ensures that every element in the input tensor is covered by a sliding window.
Shape:
- Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
- Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
.. math::
L_{out} = \left\lfloor \frac{L_{in} + 2 \times \text{padding} - \text{dilation}
\times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
Examples::
>>> # pool of size=3, stride=2
>>> m = nn.MaxPool1d(3, stride=2)
>>> input = torch.randn(20, 16, 50)
>>> output = m(input)
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
kernel_size: _size_1_t
stride: _size_1_t
padding: _size_1_t
dilation: _size_1_t
def forward(self, input: Tensor):
return F.max_pool1d(input, self.kernel_size, self.stride,
self.padding, self.dilation, ceil_mode=self.ceil_mode,
return_indices=self.return_indices)
class MaxPool2d(_MaxPoolNd):
r"""Applies a 2D max pooling over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
can be precisely described as:
.. math::
\begin{aligned}
out(N_i, C_j, h, w) ={} & \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
& \text{input}(N_i, C_j, \text{stride[0]} \times h + m,
\text{stride[1]} \times w + n)
\end{aligned}
If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
or the input. Sliding windows that would start in the right padded region are ignored.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
Args:
kernel_size: the size of the window to take a max over
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on both sides
dilation: a parameter that controls the stride of elements in the window
return_indices: if ``True``, will return the max indices along with the outputs.
Useful for :class:`torch.nn.MaxUnpool2d` later
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]}
\times (\text{kernel\_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]}
\times (\text{kernel\_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor
Examples::
>>> # pool of square window of size=3, stride=2
>>> m = nn.MaxPool2d(3, stride=2)
>>> # pool of non-square window
>>> m = nn.MaxPool2d((3, 2), stride=(2, 1))
>>> input = torch.randn(20, 16, 50, 32)
>>> output = m(input)
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
kernel_size: _size_2_t
stride: _size_2_t
padding: _size_2_t
dilation: _size_2_t
def forward(self, input: Tensor):
return F.max_pool2d(input, self.kernel_size, self.stride,
self.padding, self.dilation, ceil_mode=self.ceil_mode,
return_indices=self.return_indices)
class MaxPool3d(_MaxPoolNd):
r"""Applies a 3D max pooling over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
can be precisely described as:
.. math::
\begin{aligned}
\text{out}(N_i, C_j, d, h, w) ={} & \max_{k=0, \ldots, kD-1} \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
& \text{input}(N_i, C_j, \text{stride[0]} \times d + k,
\text{stride[1]} \times h + m, \text{stride[2]} \times w + n)
\end{aligned}
If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
or the input. Sliding windows that would start in the right padded region are ignored.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimension
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
Args:
kernel_size: the size of the window to take a max over
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on all three sides
dilation: a parameter that controls the stride of elements in the window
return_indices: if ``True``, will return the max indices along with the outputs.
Useful for :class:`torch.nn.MaxUnpool3d` later
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times
(\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times
(\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] \times
(\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
Examples::
>>> # pool of square window of size=3, stride=2
>>> m = nn.MaxPool3d(3, stride=2)
>>> # pool of non-square window
>>> m = nn.MaxPool3d((3, 2, 2), stride=(2, 1, 2))
>>> input = torch.randn(20, 16, 50,44, 31)
>>> output = m(input)
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
""" # noqa: E501
kernel_size: _size_3_t
stride: _size_3_t
padding: _size_3_t
dilation: _size_3_t
def forward(self, input: Tensor):
return F.max_pool3d(input, self.kernel_size, self.stride,
self.padding, self.dilation, ceil_mode=self.ceil_mode,
return_indices=self.return_indices)
class _MaxUnpoolNd(Module):
def extra_repr(self) -> str:
return 'kernel_size={}, stride={}, padding={}'.format(
self.kernel_size, self.stride, self.padding
)
class MaxUnpool1d(_MaxUnpoolNd):
r"""Computes a partial inverse of :class:`MaxPool1d`.
:class:`MaxPool1d` is not fully invertible, since the non-maximal values are lost.
:class:`MaxUnpool1d` takes in as input the output of :class:`MaxPool1d`
including the indices of the maximal values and computes a partial inverse
in which all non-maximal values are set to zero.
.. note:: :class:`MaxPool1d` can map several input sizes to the same output
sizes. Hence, the inversion process can get ambiguous.
To accommodate this, you can provide the needed output size
as an additional argument :attr:`output_size` in the forward call.
See the Inputs and Example below.
Args:
kernel_size (int or tuple): Size of the max pooling window.
stride (int or tuple): Stride of the max pooling window.
It is set to :attr:`kernel_size` by default.
padding (int or tuple): Padding that was added to the input
Inputs:
- `input`: the input Tensor to invert
- `indices`: the indices given out by :class:`~torch.nn.MaxPool1d`
- `output_size` (optional): the targeted output size
Shape:
- Input: :math:`(N, C, H_{in})` or :math:`(C, H_{in})`.
- Output: :math:`(N, C, H_{out})` or :math:`(C, H_{out})`, where
.. math::
H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{kernel\_size}[0]
or as given by :attr:`output_size` in the call operator
Example::
>>> # xdoctest: +IGNORE_WANT("do other tests modify the global state?")
>>> pool = nn.MaxPool1d(2, stride=2, return_indices=True)
>>> unpool = nn.MaxUnpool1d(2, stride=2)
>>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8]]])
>>> output, indices = pool(input)
>>> unpool(output, indices)
tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
>>> # Example showcasing the use of output_size
>>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8, 9]]])
>>> output, indices = pool(input)
>>> unpool(output, indices, output_size=input.size())
tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8., 0.]]])
>>> unpool(output, indices)
tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
"""
kernel_size: _size_1_t
stride: _size_1_t
padding: _size_1_t
def __init__(self, kernel_size: _size_1_t, stride: Optional[_size_1_t] = None, padding: _size_1_t = 0) -> None:
super(MaxUnpool1d, self).__init__()
self.kernel_size = _single(kernel_size)
self.stride = _single(stride if (stride is not None) else kernel_size)
self.padding = _single(padding)
def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
return F.max_unpool1d(input, indices, self.kernel_size, self.stride,
self.padding, output_size)
class MaxUnpool2d(_MaxUnpoolNd):
r"""Computes a partial inverse of :class:`MaxPool2d`.
:class:`MaxPool2d` is not fully invertible, since the non-maximal values are lost.
:class:`MaxUnpool2d` takes in as input the output of :class:`MaxPool2d`
including the indices of the maximal values and computes a partial inverse
in which all non-maximal values are set to zero.
.. note:: :class:`MaxPool2d` can map several input sizes to the same output
sizes. Hence, the inversion process can get ambiguous.
To accommodate this, you can provide the needed output size
as an additional argument :attr:`output_size` in the forward call.
See the Inputs and Example below.
Args:
kernel_size (int or tuple): Size of the max pooling window.
stride (int or tuple): Stride of the max pooling window.
It is set to :attr:`kernel_size` by default.
padding (int or tuple): Padding that was added to the input
Inputs:
- `input`: the input Tensor to invert
- `indices`: the indices given out by :class:`~torch.nn.MaxPool2d`
- `output_size` (optional): the targeted output size
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
.. math::
H_{out} = (H_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
.. math::
W_{out} = (W_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
or as given by :attr:`output_size` in the call operator
Example::
>>> pool = nn.MaxPool2d(2, stride=2, return_indices=True)
>>> unpool = nn.MaxUnpool2d(2, stride=2)
>>> input = torch.tensor([[[[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.],
[13., 14., 15., 16.]]]])
>>> output, indices = pool(input)
>>> unpool(output, indices)
tensor([[[[ 0., 0., 0., 0.],
[ 0., 6., 0., 8.],
[ 0., 0., 0., 0.],
[ 0., 14., 0., 16.]]]])
>>> # Now using output_size to resolve an ambiguous size for the inverse
>>> input = torch.torch.tensor([[[[ 1., 2., 3., 4., 5.],
[ 6., 7., 8., 9., 10.],
[11., 12., 13., 14., 15.],
[16., 17., 18., 19., 20.]]]])
>>> output, indices = pool(input)
>>> # This call will not work without specifying output_size
>>> unpool(output, indices, output_size=input.size())
tensor([[[[ 0., 0., 0., 0., 0.],
[ 0., 7., 0., 9., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 17., 0., 19., 0.]]]])
"""
kernel_size: _size_2_t
stride: _size_2_t
padding: _size_2_t
def __init__(self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = None, padding: _size_2_t = 0) -> None:
super(MaxUnpool2d, self).__init__()
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride if (stride is not None) else kernel_size)
self.padding = _pair(padding)
def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
return F.max_unpool2d(input, indices, self.kernel_size, self.stride,
self.padding, output_size)
class MaxUnpool3d(_MaxUnpoolNd):
r"""Computes a partial inverse of :class:`MaxPool3d`.
:class:`MaxPool3d` is not fully invertible, since the non-maximal values are lost.
:class:`MaxUnpool3d` takes in as input the output of :class:`MaxPool3d`
including the indices of the maximal values and computes a partial inverse
in which all non-maximal values are set to zero.
.. note:: :class:`MaxPool3d` can map several input sizes to the same output
sizes. Hence, the inversion process can get ambiguous.
To accommodate this, you can provide the needed output size
as an additional argument :attr:`output_size` in the forward call.
See the Inputs section below.
Args:
kernel_size (int or tuple): Size of the max pooling window.
stride (int or tuple): Stride of the max pooling window.
It is set to :attr:`kernel_size` by default.
padding (int or tuple): Padding that was added to the input
Inputs:
- `input`: the input Tensor to invert
- `indices`: the indices given out by :class:`~torch.nn.MaxPool3d`
- `output_size` (optional): the targeted output size
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = (D_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
.. math::
H_{out} = (H_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
.. math::
W_{out} = (W_{in} - 1) \times \text{stride[2]} - 2 \times \text{padding[2]} + \text{kernel\_size[2]}
or as given by :attr:`output_size` in the call operator
Example::
>>> # pool of square window of size=3, stride=2
>>> pool = nn.MaxPool3d(3, stride=2, return_indices=True)
>>> unpool = nn.MaxUnpool3d(3, stride=2)
>>> output, indices = pool(torch.randn(20, 16, 51, 33, 15))
>>> unpooled_output = unpool(output, indices)
>>> unpooled_output.size()
torch.Size([20, 16, 51, 33, 15])
"""
kernel_size: _size_3_t
stride: _size_3_t
padding: _size_3_t
def __init__(self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = None, padding: _size_3_t = 0) -> None:
super(MaxUnpool3d, self).__init__()
self.kernel_size = _triple(kernel_size)
self.stride = _triple(stride if (stride is not None) else kernel_size)
self.padding = _triple(padding)
def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
return F.max_unpool3d(input, indices, self.kernel_size, self.stride,
self.padding, output_size)
class _AvgPoolNd(Module):
__constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad']
def extra_repr(self) -> str:
return 'kernel_size={}, stride={}, padding={}'.format(
self.kernel_size, self.stride, self.padding
)
class AvgPool1d(_AvgPoolNd):
r"""Applies a 1D average pooling over an input signal composed of several
input planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, L)`,
output :math:`(N, C, L_{out})` and :attr:`kernel_size` :math:`k`
can be precisely described as:
.. math::
\text{out}(N_i, C_j, l) = \frac{1}{k} \sum_{m=0}^{k-1}
\text{input}(N_i, C_j, \text{stride} \times l + m)
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
for :attr:`padding` number of points.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
or the input. Sliding windows that would start in the right padded region are ignored.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can each be
an ``int`` or a one-element tuple.
Args:
kernel_size: the size of the window
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on both sides
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
count_include_pad: when True, will include the zero-padding in the averaging calculation
Shape:
- Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
- Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
.. math::
L_{out} = \left\lfloor \frac{L_{in} +
2 \times \text{padding} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
Examples::
>>> # pool with window of size=3, stride=2
>>> m = nn.AvgPool1d(3, stride=2)
>>> m(torch.tensor([[[1.,2,3,4,5,6,7]]]))
tensor([[[ 2., 4., 6.]]])
"""
kernel_size: _size_1_t
stride: _size_1_t
padding: _size_1_t
ceil_mode: bool
count_include_pad: bool
def __init__(self, kernel_size: _size_1_t, stride: _size_1_t = None, padding: _size_1_t = 0, ceil_mode: bool = False,
count_include_pad: bool = True) -> None:
super(AvgPool1d, self).__init__()
self.kernel_size = _single(kernel_size)
self.stride = _single(stride if stride is not None else kernel_size)
self.padding = _single(padding)
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
def forward(self, input: Tensor) -> Tensor:
return F.avg_pool1d(
input, self.kernel_size, self.stride, self.padding, self.ceil_mode,
self.count_include_pad)
class AvgPool2d(_AvgPoolNd):
r"""Applies a 2D average pooling over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
can be precisely described as:
.. math::
out(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1}
input(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n)
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
for :attr:`padding` number of points.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
or the input. Sliding windows that would start in the right padded region are ignored.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
Args:
kernel_size: the size of the window
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on both sides
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
count_include_pad: when True, will include the zero-padding in the averaging calculation
divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] -
\text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] -
\text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
Examples::
>>> # pool of square window of size=3, stride=2
>>> m = nn.AvgPool2d(3, stride=2)
>>> # pool of non-square window
>>> m = nn.AvgPool2d((3, 2), stride=(2, 1))
>>> input = torch.randn(20, 16, 50, 32)
>>> output = m(input)
"""
__constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
kernel_size: _size_2_t
stride: _size_2_t
padding: _size_2_t
ceil_mode: bool
count_include_pad: bool
def __init__(self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = None, padding: _size_2_t = 0,
ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> None:
super(AvgPool2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride if (stride is not None) else kernel_size
self.padding = padding
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
self.divisor_override = divisor_override
def forward(self, input: Tensor) -> Tensor:
return F.avg_pool2d(input, self.kernel_size, self.stride,
self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
class AvgPool3d(_AvgPoolNd):
r"""Applies a 3D average pooling over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
can be precisely described as:
.. math::
\begin{aligned}
\text{out}(N_i, C_j, d, h, w) ={} & \sum_{k=0}^{kD-1} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1} \\
& \frac{\text{input}(N_i, C_j, \text{stride}[0] \times d + k,
\text{stride}[1] \times h + m, \text{stride}[2] \times w + n)}
{kD \times kH \times kW}
\end{aligned}
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on all three sides
for :attr:`padding` number of points.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
or the input. Sliding windows that would start in the right padded region are ignored.
The parameters :attr:`kernel_size`, :attr:`stride` can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimension
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
Args:
kernel_size: the size of the window
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on all three sides
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
count_include_pad: when True, will include the zero-padding in the averaging calculation
divisor_override: if specified, it will be used as divisor, otherwise :attr:`kernel_size` will be used
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
:math:`(C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] -
\text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] -
\text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] -
\text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor
Examples::
>>> # pool of square window of size=3, stride=2
>>> m = nn.AvgPool3d(3, stride=2)
>>> # pool of non-square window
>>> m = nn.AvgPool3d((3, 2, 2), stride=(2, 1, 2))
>>> input = torch.randn(20, 16, 50,44, 31)
>>> output = m(input)
"""
__constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
kernel_size: _size_3_t
stride: _size_3_t
padding: _size_3_t
ceil_mode: bool
count_include_pad: bool
def __init__(self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = None, padding: _size_3_t = 0,
ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> None:
super(AvgPool3d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride if (stride is not None) else kernel_size
self.padding = padding
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
self.divisor_override = divisor_override
def forward(self, input: Tensor) -> Tensor:
return F.avg_pool3d(input, self.kernel_size, self.stride,
self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
def __setstate__(self, d):
super(AvgPool3d, self).__setstate__(d)
self.__dict__.setdefault('padding', 0)
self.__dict__.setdefault('ceil_mode', False)
self.__dict__.setdefault('count_include_pad', True)
class FractionalMaxPool2d(Module):
r"""Applies a 2D fractional max pooling over an input signal composed of several input planes.
Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic
step size determined by the target output size.
The number of output features is equal to the number of input planes.
Args:
kernel_size: the size of the window to take a max over.
Can be a single number k (for a square kernel of k x k) or a tuple `(kh, kw)`
output_size: the target output size of the image of the form `oH x oW`.
Can be a tuple `(oH, oW)` or a single number oH for a square image `oH x oH`
output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
This has to be a number or tuple in the range (0, 1)
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to :meth:`nn.MaxUnpool2d`. Default: ``False``
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
:math:`(H_{out}, W_{out})=\text{output\_size}` or
:math:`(H_{out}, W_{out})=\text{output\_ratio} \times (H_{in}, W_{in})`.
Examples:
>>> # pool of square window of size=3, and target output size 13x12
>>> m = nn.FractionalMaxPool2d(3, output_size=(13, 12))
>>> # pool of square window and target output size being half of input image size
>>> m = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
>>> input = torch.randn(20, 16, 50, 32)
>>> output = m(input)
.. _Fractional MaxPooling:
https://arxiv.org/abs/1412.6071
"""
__constants__ = ['kernel_size', 'return_indices', 'output_size',
'output_ratio']
kernel_size: _size_2_t
return_indices: bool
output_size: _size_2_t
output_ratio: _ratio_2_t
def __init__(self, kernel_size: _size_2_t, output_size: Optional[_size_2_t] = None,
output_ratio: Optional[_ratio_2_t] = None,
return_indices: bool = False, _random_samples=None) -> None:
super(FractionalMaxPool2d, self).__init__()
self.kernel_size = _pair(kernel_size)
self.return_indices = return_indices
self.register_buffer('_random_samples', _random_samples)
self.output_size = _pair(output_size) if output_size is not None else None
self.output_ratio = _pair(output_ratio) if output_ratio is not None else None
if output_size is None and output_ratio is None:
raise ValueError("FractionalMaxPool2d requires specifying either "
"an output size, or a pooling ratio")
if output_size is not None and output_ratio is not None:
raise ValueError("only one of output_size and output_ratio may be specified")
if self.output_ratio is not None:
if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1):
raise ValueError("output_ratio must be between 0 and 1 (got {})"
.format(output_ratio))
def forward(self, input: Tensor):
return F.fractional_max_pool2d(
input, self.kernel_size, self.output_size, self.output_ratio,
self.return_indices,
_random_samples=self._random_samples)
class FractionalMaxPool3d(Module):
r"""Applies a 3D fractional max pooling over an input signal composed of several input planes.
Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
The max-pooling operation is applied in :math:`kT \times kH \times kW` regions by a stochastic
step size determined by the target output size.
The number of output features is equal to the number of input planes.
Args:
kernel_size: the size of the window to take a max over.
Can be a single number k (for a square kernel of k x k x k) or a tuple `(kt x kh x kw)`
output_size: the target output size of the image of the form `oT x oH x oW`.
Can be a tuple `(oT, oH, oW)` or a single number oH for a square image `oH x oH x oH`
output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
This has to be a number or tuple in the range (0, 1)
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to :meth:`nn.MaxUnpool3d`. Default: ``False``
Shape:
- Input: :math:`(N, C, T_{in}, H_{in}, W_{in})` or :math:`(C, T_{in}, H_{in}, W_{in})`.
- Output: :math:`(N, C, T_{out}, H_{out}, W_{out})` or :math:`(C, T_{out}, H_{out}, W_{out})`, where
:math:`(T_{out}, H_{out}, W_{out})=\text{output\_size}` or
:math:`(T_{out}, H_{out}, W_{out})=\text{output\_ratio} \times (T_{in}, H_{in}, W_{in})`
Examples:
>>> # pool of cubic window of size=3, and target output size 13x12x11
>>> m = nn.FractionalMaxPool3d(3, output_size=(13, 12, 11))
>>> # pool of cubic window and target output size being half of input size
>>> m = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5))
>>> input = torch.randn(20, 16, 50, 32, 16)
>>> output = m(input)
.. _Fractional MaxPooling:
https://arxiv.org/abs/1412.6071
"""
__constants__ = ['kernel_size', 'return_indices', 'output_size',
'output_ratio']
kernel_size: _size_3_t
return_indices: bool
output_size: _size_3_t
output_ratio: _ratio_3_t
def __init__(self, kernel_size: _size_3_t, output_size: Optional[_size_3_t] = None,
output_ratio: Optional[_ratio_3_t] = None,
return_indices: bool = False, _random_samples=None) -> None:
super(FractionalMaxPool3d, self).__init__()
self.kernel_size = _triple(kernel_size)
self.return_indices = return_indices
self.register_buffer('_random_samples', _random_samples)
self.output_size = _triple(output_size) if output_size is not None else None
self.output_ratio = _triple(output_ratio) if output_ratio is not None else None
if output_size is None and output_ratio is None:
raise ValueError("FractionalMaxPool3d requires specifying either "
"an output size, or a pooling ratio")
if output_size is not None and output_ratio is not None:
raise ValueError("only one of output_size and output_ratio may be specified")
if self.output_ratio is not None:
if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1 and 0 < self.output_ratio[2] < 1):
raise ValueError("output_ratio must be between 0 and 1 (got {})"
.format(output_ratio))
def forward(self, input: Tensor):
return F.fractional_max_pool3d(
input, self.kernel_size, self.output_size, self.output_ratio,
self.return_indices,
_random_samples=self._random_samples)
class _LPPoolNd(Module):
__constants__ = ['norm_type', 'kernel_size', 'stride', 'ceil_mode']
norm_type: float
ceil_mode: bool
def __init__(self, norm_type: float, kernel_size: _size_any_t, stride: Optional[_size_any_t] = None,
ceil_mode: bool = False) -> None:
super(_LPPoolNd, self).__init__()
self.norm_type = norm_type
self.kernel_size = kernel_size
self.stride = stride
self.ceil_mode = ceil_mode
def extra_repr(self) -> str:
return 'norm_type={norm_type}, kernel_size={kernel_size}, stride={stride}, ' \
'ceil_mode={ceil_mode}'.format(**self.__dict__)
class LPPool1d(_LPPoolNd):
r"""Applies a 1D power-average pooling over an input signal composed of several input
planes.
On each window, the function computed is:
.. math::
f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
- At p = :math:`\infty`, one gets Max Pooling
- At p = 1, one gets Sum Pooling (which is proportional to Average Pooling)
.. note:: If the sum to the power of `p` is zero, the gradient of this function is
not defined. This implementation will set the gradient to zero in this case.
Args:
kernel_size: a single int, the size of the window
stride: a single int, the stride of the window. Default value is :attr:`kernel_size`
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
- Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
.. math::
L_{out} = \left\lfloor\frac{L_{in} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
Examples::
>>> # power-2 pool of window of length 3, with stride 2.
>>> m = nn.LPPool1d(2, 3, stride=2)
>>> input = torch.randn(20, 16, 50)
>>> output = m(input)
"""
kernel_size: _size_1_t
stride: _size_1_t
def forward(self, input: Tensor) -> Tensor:
return F.lp_pool1d(input, float(self.norm_type), self.kernel_size,
self.stride, self.ceil_mode)
class LPPool2d(_LPPoolNd):
r"""Applies a 2D power-average pooling over an input signal composed of several input
planes.
On each window, the function computed is:
.. math::
f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
- At p = :math:`\infty`, one gets Max Pooling
- At p = 1, one gets Sum Pooling (which is proportional to average pooling)
The parameters :attr:`kernel_size`, :attr:`stride` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
.. note:: If the sum to the power of `p` is zero, the gradient of this function is
not defined. This implementation will set the gradient to zero in this case.
Args:
kernel_size: the size of the window
stride: the stride of the window. Default value is :attr:`kernel_size`
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})`, where
.. math::
H_{out} = \left\lfloor\frac{H_{in} - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
Examples::
>>> # power-2 pool of square window of size=3, stride=2
>>> m = nn.LPPool2d(2, 3, stride=2)
>>> # pool of non-square window of power 1.2
>>> m = nn.LPPool2d(1.2, (3, 2), stride=(2, 1))
>>> input = torch.randn(20, 16, 50, 32)
>>> output = m(input)
"""
kernel_size: _size_2_t
stride: _size_2_t
def forward(self, input: Tensor) -> Tensor:
return F.lp_pool2d(input, float(self.norm_type), self.kernel_size,
self.stride, self.ceil_mode)
class _AdaptiveMaxPoolNd(Module):
__constants__ = ['output_size', 'return_indices']
return_indices: bool
def __init__(self, output_size: _size_any_opt_t, return_indices: bool = False) -> None:
super(_AdaptiveMaxPoolNd, self).__init__()
self.output_size = output_size
self.return_indices = return_indices
def extra_repr(self) -> str:
return 'output_size={}'.format(self.output_size)
# FIXME (by @ssnl): Improve adaptive pooling docs: specify what the input and
# output shapes are, and how the operation computes output.
class AdaptiveMaxPool1d(_AdaptiveMaxPoolNd):
r"""Applies a 1D adaptive max pooling over an input signal composed of several input planes.
The output size is :math:`L_{out}`, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size :math:`L_{out}`.
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to nn.MaxUnpool1d. Default: ``False``
Shape:
- Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
- Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
:math:`L_{out}=\text{output\_size}`.
Examples:
>>> # target output size of 5
>>> m = nn.AdaptiveMaxPool1d(5)
>>> input = torch.randn(1, 64, 8)
>>> output = m(input)
"""
output_size: _size_1_t
def forward(self, input: Tensor) -> Tensor:
return F.adaptive_max_pool1d(input, self.output_size, self.return_indices)
class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd):
r"""Applies a 2D adaptive max pooling over an input signal composed of several input planes.
The output is of size :math:`H_{out} \times W_{out}`, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form :math:`H_{out} \times W_{out}`.
Can be a tuple :math:`(H_{out}, W_{out})` or a single :math:`H_{out}` for a
square image :math:`H_{out} \times H_{out}`. :math:`H_{out}` and :math:`W_{out}`
can be either a ``int``, or ``None`` which means the size will be the same as that
of the input.
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to nn.MaxUnpool2d. Default: ``False``
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
:math:`(H_{out}, W_{out})=\text{output\_size}`.
Examples:
>>> # target output size of 5x7
>>> m = nn.AdaptiveMaxPool2d((5,7))
>>> input = torch.randn(1, 64, 8, 9)
>>> output = m(input)
>>> # target output size of 7x7 (square)
>>> m = nn.AdaptiveMaxPool2d(7)
>>> input = torch.randn(1, 64, 10, 9)
>>> output = m(input)
>>> # target output size of 10x7
>>> m = nn.AdaptiveMaxPool2d((None, 7))
>>> input = torch.randn(1, 64, 10, 9)
>>> output = m(input)
"""
output_size: _size_2_opt_t
def forward(self, input: Tensor):
return F.adaptive_max_pool2d(input, self.output_size, self.return_indices)
class AdaptiveMaxPool3d(_AdaptiveMaxPoolNd):
r"""Applies a 3D adaptive max pooling over an input signal composed of several input planes.
The output is of size :math:`D_{out} \times H_{out} \times W_{out}`, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form :math:`D_{out} \times H_{out} \times W_{out}`.
Can be a tuple :math:`(D_{out}, H_{out}, W_{out})` or a single
:math:`D_{out}` for a cube :math:`D_{out} \times D_{out} \times D_{out}`.
:math:`D_{out}`, :math:`H_{out}` and :math:`W_{out}` can be either a
``int``, or ``None`` which means the size will be the same as that of the input.
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to nn.MaxUnpool3d. Default: ``False``
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
where :math:`(D_{out}, H_{out}, W_{out})=\text{output\_size}`.
Examples:
>>> # target output size of 5x7x9
>>> m = nn.AdaptiveMaxPool3d((5,7,9))
>>> input = torch.randn(1, 64, 8, 9, 10)
>>> output = m(input)
>>> # target output size of 7x7x7 (cube)
>>> m = nn.AdaptiveMaxPool3d(7)
>>> input = torch.randn(1, 64, 10, 9, 8)
>>> output = m(input)
>>> # target output size of 7x9x8
>>> m = nn.AdaptiveMaxPool3d((7, None, None))
>>> input = torch.randn(1, 64, 10, 9, 8)
>>> output = m(input)
"""
output_size: _size_3_opt_t
def forward(self, input: Tensor):
return F.adaptive_max_pool3d(input, self.output_size, self.return_indices)
class _AdaptiveAvgPoolNd(Module):
__constants__ = ['output_size']
def __init__(self, output_size: _size_any_opt_t) -> None:
super(_AdaptiveAvgPoolNd, self).__init__()
self.output_size = output_size
def extra_repr(self) -> str:
return 'output_size={}'.format(self.output_size)
class AdaptiveAvgPool1d(_AdaptiveAvgPoolNd):
r"""Applies a 1D adaptive average pooling over an input signal composed of several input planes.
The output size is :math:`L_{out}`, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size :math:`L_{out}`.
Shape:
- Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
- Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
:math:`L_{out}=\text{output\_size}`.
Examples:
>>> # target output size of 5
>>> m = nn.AdaptiveAvgPool1d(5)
>>> input = torch.randn(1, 64, 8)
>>> output = m(input)
"""
output_size: _size_1_t
def forward(self, input: Tensor) -> Tensor:
return F.adaptive_avg_pool1d(input, self.output_size)
class AdaptiveAvgPool2d(_AdaptiveAvgPoolNd):
r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes.
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H.
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
- Output: :math:`(N, C, S_{0}, S_{1})` or :math:`(C, S_{0}, S_{1})`, where
:math:`S=\text{output\_size}`.
Examples:
>>> # target output size of 5x7
>>> m = nn.AdaptiveAvgPool2d((5,7))
>>> input = torch.randn(1, 64, 8, 9)
>>> output = m(input)
>>> # target output size of 7x7 (square)
>>> m = nn.AdaptiveAvgPool2d(7)
>>> input = torch.randn(1, 64, 10, 9)
>>> output = m(input)
>>> # target output size of 10x7
>>> m = nn.AdaptiveAvgPool2d((None, 7))
>>> input = torch.randn(1, 64, 10, 9)
>>> output = m(input)
"""
output_size: _size_2_opt_t
def forward(self, input: Tensor) -> Tensor:
return F.adaptive_avg_pool2d(input, self.output_size)
class AdaptiveAvgPool3d(_AdaptiveAvgPoolNd):
r"""Applies a 3D adaptive average pooling over an input signal composed of several input planes.
The output is of size D x H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the form D x H x W.
Can be a tuple (D, H, W) or a single number D for a cube D x D x D.
D, H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
- Output: :math:`(N, C, S_{0}, S_{1}, S_{2})` or :math:`(C, S_{0}, S_{1}, S_{2})`,
where :math:`S=\text{output\_size}`.
Examples:
>>> # target output size of 5x7x9
>>> m = nn.AdaptiveAvgPool3d((5,7,9))
>>> input = torch.randn(1, 64, 8, 9, 10)
>>> output = m(input)
>>> # target output size of 7x7x7 (cube)
>>> m = nn.AdaptiveAvgPool3d(7)
>>> input = torch.randn(1, 64, 10, 9, 8)
>>> output = m(input)
>>> # target output size of 7x9x8
>>> m = nn.AdaptiveAvgPool3d((7, None, None))
>>> input = torch.randn(1, 64, 10, 9, 8)
>>> output = m(input)
"""
output_size: _size_3_opt_t
def forward(self, input: Tensor) -> Tensor:
return F.adaptive_avg_pool3d(input, self.output_size)
| pytorch-master | torch/nn/modules/pooling.py |
from .module import Module
from .linear import Identity, Linear, Bilinear, LazyLinear
from .conv import Conv1d, Conv2d, Conv3d, \
ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
Hardsigmoid, Hardswish, SiLU, Mish
from .loss import L1Loss, NLLLoss, KLDivLoss, MSELoss, BCELoss, BCEWithLogitsLoss, NLLLoss2d, \
CosineEmbeddingLoss, CTCLoss, HingeEmbeddingLoss, MarginRankingLoss, \
MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, SmoothL1Loss, HuberLoss, \
SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, TripletMarginWithDistanceLoss, PoissonNLLLoss, GaussianNLLLoss
from .container import Container, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, FractionalMaxPool3d, LPPool1d, LPPool2d, \
AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
LazyInstanceNorm1d, LazyInstanceNorm2d, LazyInstanceNorm3d
from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
from .dropout import Dropout, Dropout1d, Dropout2d, Dropout3d, AlphaDropout, FeatureAlphaDropout
from .padding import ReflectionPad1d, ReflectionPad2d, ReflectionPad3d, ReplicationPad1d, ReplicationPad2d, \
ReplicationPad3d, ZeroPad2d, ConstantPad1d, ConstantPad2d, ConstantPad3d
from .sparse import Embedding, EmbeddingBag
from .rnn import RNNBase, RNN, LSTM, GRU, \
RNNCellBase, RNNCell, LSTMCell, GRUCell
from .pixelshuffle import PixelShuffle, PixelUnshuffle
from .upsampling import UpsamplingNearest2d, UpsamplingBilinear2d, Upsample
from .distance import PairwiseDistance, CosineSimilarity
from .fold import Fold, Unfold
from .adaptive import AdaptiveLogSoftmaxWithLoss
from .transformer import TransformerEncoder, TransformerDecoder, \
TransformerEncoderLayer, TransformerDecoderLayer, Transformer
from .flatten import Flatten, Unflatten
from .channelshuffle import ChannelShuffle
__all__ = [
'Module', 'Identity', 'Linear', 'Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d',
'ConvTranspose2d', 'ConvTranspose3d', 'Threshold', 'ReLU', 'Hardtanh', 'ReLU6',
'Sigmoid', 'Tanh', 'Softmax', 'Softmax2d', 'LogSoftmax', 'ELU', 'SELU', 'CELU', 'GLU', 'GELU', 'Hardshrink',
'LeakyReLU', 'LogSigmoid', 'Softplus', 'Softshrink', 'MultiheadAttention', 'PReLU', 'Softsign', 'Softmin',
'Tanhshrink', 'RReLU', 'L1Loss', 'NLLLoss', 'KLDivLoss', 'MSELoss', 'BCELoss', 'BCEWithLogitsLoss',
'NLLLoss2d', 'PoissonNLLLoss', 'CosineEmbeddingLoss', 'CTCLoss', 'HingeEmbeddingLoss', 'MarginRankingLoss',
'MultiLabelMarginLoss', 'MultiLabelSoftMarginLoss', 'MultiMarginLoss', 'SmoothL1Loss', 'GaussianNLLLoss',
'HuberLoss', 'SoftMarginLoss', 'CrossEntropyLoss', 'Container', 'Sequential', 'ModuleList', 'ModuleDict',
'ParameterList', 'ParameterDict', 'AvgPool1d', 'AvgPool2d', 'AvgPool3d', 'MaxPool1d', 'MaxPool2d',
'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d', 'FractionalMaxPool2d', "FractionalMaxPool3d",
'LPPool1d', 'LPPool2d', 'LocalResponseNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d', 'InstanceNorm1d',
'InstanceNorm2d', 'InstanceNorm3d', 'LayerNorm', 'GroupNorm', 'SyncBatchNorm',
'Dropout', 'Dropout1d', 'Dropout2d', 'Dropout3d', 'AlphaDropout', 'FeatureAlphaDropout',
'ReflectionPad1d', 'ReflectionPad2d', 'ReflectionPad3d', 'ReplicationPad2d', 'ReplicationPad1d', 'ReplicationPad3d',
'CrossMapLRN2d', 'Embedding', 'EmbeddingBag', 'RNNBase', 'RNN', 'LSTM', 'GRU', 'RNNCellBase', 'RNNCell',
'LSTMCell', 'GRUCell', 'PixelShuffle', 'PixelUnshuffle', 'Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d',
'PairwiseDistance', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d', 'AdaptiveAvgPool1d',
'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d', 'TripletMarginLoss', 'ZeroPad2d', 'ConstantPad1d', 'ConstantPad2d',
'ConstantPad3d', 'Bilinear', 'CosineSimilarity', 'Unfold', 'Fold',
'AdaptiveLogSoftmaxWithLoss', 'TransformerEncoder', 'TransformerDecoder',
'TransformerEncoderLayer', 'TransformerDecoderLayer', 'Transformer',
'LazyLinear', 'LazyConv1d', 'LazyConv2d', 'LazyConv3d',
'LazyConvTranspose1d', 'LazyConvTranspose2d', 'LazyConvTranspose3d',
'LazyBatchNorm1d', 'LazyBatchNorm2d', 'LazyBatchNorm3d',
'LazyInstanceNorm1d', 'LazyInstanceNorm2d', 'LazyInstanceNorm3d',
'Flatten', 'Unflatten', 'Hardsigmoid', 'Hardswish', 'SiLU', 'Mish', 'TripletMarginWithDistanceLoss', 'ChannelShuffle'
]
| pytorch-master | torch/nn/modules/__init__.py |
from .module import Module
from .. import functional as F
from torch import Tensor
__all__ = ['PairwiseDistance', 'CosineSimilarity']
class PairwiseDistance(Module):
r"""
Computes the pairwise distance between vectors :math:`v_1`, :math:`v_2` using the p-norm:
.. math ::
\Vert x \Vert _p = \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}.
Args:
p (real): the norm degree. Default: 2
eps (float, optional): Small value to avoid division by zero.
Default: 1e-6
keepdim (bool, optional): Determines whether or not to keep the vector dimension.
Default: False
Shape:
- Input1: :math:`(N, D)` or :math:`(D)` where `N = batch dimension` and `D = vector dimension`
- Input2: :math:`(N, D)` or :math:`(D)`, same shape as the Input1
- Output: :math:`(N)` or :math:`()` based on input dimension.
If :attr:`keepdim` is ``True``, then :math:`(N, 1)` or :math:`(1)` based on input dimension.
Examples::
>>> pdist = nn.PairwiseDistance(p=2)
>>> input1 = torch.randn(100, 128)
>>> input2 = torch.randn(100, 128)
>>> output = pdist(input1, input2)
"""
__constants__ = ['norm', 'eps', 'keepdim']
norm: float
eps: float
keepdim: bool
def __init__(self, p: float = 2., eps: float = 1e-6, keepdim: bool = False) -> None:
super(PairwiseDistance, self).__init__()
self.norm = p
self.eps = eps
self.keepdim = keepdim
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
return F.pairwise_distance(x1, x2, self.norm, self.eps, self.keepdim)
class CosineSimilarity(Module):
r"""Returns cosine similarity between :math:`x_1` and :math:`x_2`, computed along `dim`.
.. math ::
\text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}.
Args:
dim (int, optional): Dimension where cosine similarity is computed. Default: 1
eps (float, optional): Small value to avoid division by zero.
Default: 1e-8
Shape:
- Input1: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`
- Input2: :math:`(\ast_1, D, \ast_2)`, same number of dimensions as x1, matching x1 size at dimension `dim`,
and broadcastable with x1 at other dimensions.
- Output: :math:`(\ast_1, \ast_2)`
Examples::
>>> input1 = torch.randn(100, 128)
>>> input2 = torch.randn(100, 128)
>>> cos = nn.CosineSimilarity(dim=1, eps=1e-6)
>>> output = cos(input1, input2)
"""
__constants__ = ['dim', 'eps']
dim: int
eps: float
def __init__(self, dim: int = 1, eps: float = 1e-8) -> None:
super(CosineSimilarity, self).__init__()
self.dim = dim
self.eps = eps
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
return F.cosine_similarity(x1, x2, self.dim, self.eps)
| pytorch-master | torch/nn/modules/distance.py |
import warnings
from collections import OrderedDict, abc as container_abcs
from itertools import chain, islice
import operator
import torch
from .module import Module
from ..parameter import Parameter
from torch._jit_internal import _copy_to_script_wrapper
from typing import Any, Dict, Iterable, Iterator, Mapping, Optional, overload, Tuple, TypeVar, Union
__all__ = ['Container', 'Sequential', 'ModuleList', 'ModuleDict', 'ParameterList', 'ParameterDict']
T = TypeVar('T', bound=Module)
class Container(Module):
def __init__(self, **kwargs: Any) -> None:
super(Container, self).__init__()
# DeprecationWarning is ignored by default <sigh>
warnings.warn("nn.Container is deprecated. All of it's functionality "
"is now implemented in nn.Module. Subclass that instead.")
for key, value in kwargs.items():
self.add_module(key, value)
class Sequential(Module):
r"""A sequential container.
Modules will be added to it in the order they are passed in the
constructor. Alternatively, an ``OrderedDict`` of modules can be
passed in. The ``forward()`` method of ``Sequential`` accepts any
input and forwards it to the first module it contains. It then
"chains" outputs to inputs sequentially for each subsequent module,
finally returning the output of the last module.
The value a ``Sequential`` provides over manually calling a sequence
of modules is that it allows treating the whole container as a
single module, such that performing a transformation on the
``Sequential`` applies to each of the modules it stores (which are
each a registered submodule of the ``Sequential``).
What's the difference between a ``Sequential`` and a
:class:`torch.nn.ModuleList`? A ``ModuleList`` is exactly what it
sounds like--a list for storing ``Module`` s! On the other hand,
the layers in a ``Sequential`` are connected in a cascading way.
Example::
# Using Sequential to create a small model. When `model` is run,
# input will first be passed to `Conv2d(1,20,5)`. The output of
# `Conv2d(1,20,5)` will be used as the input to the first
# `ReLU`; the output of the first `ReLU` will become the input
# for `Conv2d(20,64,5)`. Finally, the output of
# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
model = nn.Sequential(
nn.Conv2d(1,20,5),
nn.ReLU(),
nn.Conv2d(20,64,5),
nn.ReLU()
)
# Using Sequential with OrderedDict. This is functionally the
# same as the above code
model = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(1,20,5)),
('relu1', nn.ReLU()),
('conv2', nn.Conv2d(20,64,5)),
('relu2', nn.ReLU())
]))
"""
_modules: Dict[str, Module] # type: ignore[assignment]
@overload
def __init__(self, *args: Module) -> None:
...
@overload
def __init__(self, arg: 'OrderedDict[str, Module]') -> None:
...
def __init__(self, *args):
super(Sequential, self).__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict):
for key, module in args[0].items():
self.add_module(key, module)
else:
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def _get_item_by_idx(self, iterator, idx) -> T:
"""Get the idx-th item of the iterator"""
size = len(self)
idx = operator.index(idx)
if not -size <= idx < size:
raise IndexError('index {} is out of range'.format(idx))
idx %= size
return next(islice(iterator, idx, None))
@_copy_to_script_wrapper
def __getitem__(self, idx) -> Union['Sequential', T]:
if isinstance(idx, slice):
return self.__class__(OrderedDict(list(self._modules.items())[idx]))
else:
return self._get_item_by_idx(self._modules.values(), idx)
def __setitem__(self, idx: int, module: Module) -> None:
key: str = self._get_item_by_idx(self._modules.keys(), idx)
return setattr(self, key, module)
def __delitem__(self, idx: Union[slice, int]) -> None:
if isinstance(idx, slice):
for key in list(self._modules.keys())[idx]:
delattr(self, key)
else:
key = self._get_item_by_idx(self._modules.keys(), idx)
delattr(self, key)
# To preserve numbering
str_indices = [str(i) for i in range(len(self._modules))]
self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
@_copy_to_script_wrapper
def __len__(self) -> int:
return len(self._modules)
def __add__(self, other) -> 'Sequential':
if isinstance(other, Sequential):
ret = Sequential()
for layer in self:
ret.append(layer)
for layer in other:
ret.append(layer)
return ret
else:
raise ValueError('add operator supports only objects '
'of Sequential class, but {} is given.'.format(
str(type(other))))
def pop(self, key: Union[int, slice]) -> Module:
v = self[key]
del self[key]
return v
def __iadd__(self, other) -> 'Sequential':
if isinstance(other, Sequential):
offset = len(self)
for i, module in enumerate(other):
self.add_module(str(i + offset), module)
return self
else:
raise ValueError('add operator supports only objects '
'of Sequential class, but {} is given.'.format(
str(type(other))))
def __mul__(self, other: int) -> 'Sequential':
if not isinstance(other, int):
raise TypeError(f"unsupported operand type(s) for *: {type(self)} and {type(other)}")
elif (other <= 0):
raise ValueError(f"Non-positive multiplication factor {other} for {type(self)}")
else:
combined = Sequential()
offset = 0
for _ in range(other):
for module in self:
combined.add_module(str(offset), module)
offset += 1
return combined
def __rmul__(self, other: int) -> 'Sequential':
return self.__mul__(other)
def __imul__(self, other: int) -> 'Sequential':
if not isinstance(other, int):
raise TypeError(f"unsupported operand type(s) for *: {type(self)} and {type(other)}")
elif (other <= 0):
raise ValueError(f"Non-positive multiplication factor {other} for {type(self)}")
else:
len_original = len(self)
offset = len(self)
for _ in range(other - 1):
for i in range(len_original):
self.add_module(str(i + offset), self._modules[str(i)])
offset += len_original
return self
@_copy_to_script_wrapper
def __dir__(self):
keys = super(Sequential, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
@_copy_to_script_wrapper
def __iter__(self) -> Iterator[Module]:
return iter(self._modules.values())
# NB: We can't really type check this function as the type of input
# may change dynamically (as is tested in
# TestScript.test_sequential_intermediary_types). Cannot annotate
# with Any as TorchScript expects a more precise type
def forward(self, input):
for module in self:
input = module(input)
return input
def append(self, module: Module) -> 'Sequential':
r"""Appends a given module to the end.
Args:
module (nn.Module): module to append
"""
self.add_module(str(len(self)), module)
return self
def insert(self, index: int, module: Module) -> 'Sequential':
if not isinstance(module, Module):
raise AssertionError(
'module should be of type: {}'.format(Module))
n = len(self._modules)
if not (-n <= index <= n):
raise IndexError(
'Index out of range: {}'.format(index))
if index < 0:
index += n
for i in range(n, index, -1):
self._modules[str(i)] = self._modules[str(i - 1)]
self._modules[str(index)] = module
return self
def extend(self, sequential) -> 'Sequential':
for layer in sequential:
self.append(layer)
return self
class ModuleList(Module):
r"""Holds submodules in a list.
:class:`~torch.nn.ModuleList` can be indexed like a regular Python list, but
modules it contains are properly registered, and will be visible by all
:class:`~torch.nn.Module` methods.
Args:
modules (iterable, optional): an iterable of modules to add
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)])
def forward(self, x):
# ModuleList can act as an iterable, or be indexed using ints
for i, l in enumerate(self.linears):
x = self.linears[i // 2](x) + l(x)
return x
"""
_modules: Dict[str, Module] # type: ignore[assignment]
def __init__(self, modules: Optional[Iterable[Module]] = None) -> None:
super(ModuleList, self).__init__()
if modules is not None:
self += modules
def _get_abs_string_index(self, idx):
"""Get the absolute index for the list of modules"""
idx = operator.index(idx)
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
return str(idx)
@_copy_to_script_wrapper
def __getitem__(self, idx: int) -> Union[Module, 'ModuleList']:
if isinstance(idx, slice):
return self.__class__(list(self._modules.values())[idx])
else:
return self._modules[self._get_abs_string_index(idx)]
def __setitem__(self, idx: int, module: Module) -> None:
idx = self._get_abs_string_index(idx)
return setattr(self, str(idx), module)
def __delitem__(self, idx: Union[int, slice]) -> None:
if isinstance(idx, slice):
for k in range(len(self._modules))[idx]:
delattr(self, str(k))
else:
delattr(self, self._get_abs_string_index(idx))
# To preserve numbering, self._modules is being reconstructed with modules after deletion
str_indices = [str(i) for i in range(len(self._modules))]
self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
@_copy_to_script_wrapper
def __len__(self) -> int:
return len(self._modules)
@_copy_to_script_wrapper
def __iter__(self) -> Iterator[Module]:
return iter(self._modules.values())
def __iadd__(self, modules: Iterable[Module]) -> 'ModuleList':
return self.extend(modules)
def __add__(self, other: Iterable[Module]) -> 'ModuleList':
combined = ModuleList()
for i, module in enumerate(chain(self, other)):
combined.add_module(str(i), module)
return combined
@_copy_to_script_wrapper
def __dir__(self):
keys = super(ModuleList, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def insert(self, index: int, module: Module) -> None:
r"""Insert a given module before a given index in the list.
Args:
index (int): index to insert.
module (nn.Module): module to insert
"""
for i in range(len(self._modules), index, -1):
self._modules[str(i)] = self._modules[str(i - 1)]
self._modules[str(index)] = module
def append(self, module: Module) -> 'ModuleList':
r"""Appends a given module to the end of the list.
Args:
module (nn.Module): module to append
"""
self.add_module(str(len(self)), module)
return self
def pop(self, key: Union[int, slice]) -> Module:
v = self[key]
del self[key]
return v
def extend(self, modules: Iterable[Module]) -> 'ModuleList':
r"""Appends modules from a Python iterable to the end of the list.
Args:
modules (iterable): iterable of modules to append
"""
if not isinstance(modules, container_abcs.Iterable):
raise TypeError("ModuleList.extend should be called with an "
"iterable, but got " + type(modules).__name__)
offset = len(self)
for i, module in enumerate(modules):
self.add_module(str(offset + i), module)
return self
# remove forward alltogether to fallback on Module's _forward_unimplemented
class ModuleDict(Module):
r"""Holds submodules in a dictionary.
:class:`~torch.nn.ModuleDict` can be indexed like a regular Python dictionary,
but modules it contains are properly registered, and will be visible by all
:class:`~torch.nn.Module` methods.
:class:`~torch.nn.ModuleDict` is an **ordered** dictionary that respects
* the order of insertion, and
* in :meth:`~torch.nn.ModuleDict.update`, the order of the merged
``OrderedDict``, ``dict`` (started from Python 3.6) or another
:class:`~torch.nn.ModuleDict` (the argument to
:meth:`~torch.nn.ModuleDict.update`).
Note that :meth:`~torch.nn.ModuleDict.update` with other unordered mapping
types (e.g., Python's plain ``dict`` before Python version 3.6) does not
preserve the order of the merged mapping.
Args:
modules (iterable, optional): a mapping (dictionary) of (string: module)
or an iterable of key-value pairs of type (string, module)
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.choices = nn.ModuleDict({
'conv': nn.Conv2d(10, 10, 3),
'pool': nn.MaxPool2d(3)
})
self.activations = nn.ModuleDict([
['lrelu', nn.LeakyReLU()],
['prelu', nn.PReLU()]
])
def forward(self, x, choice, act):
x = self.choices[choice](x)
x = self.activations[act](x)
return x
"""
_modules: Dict[str, Module] # type: ignore[assignment]
def __init__(self, modules: Optional[Mapping[str, Module]] = None) -> None:
super(ModuleDict, self).__init__()
if modules is not None:
self.update(modules)
@_copy_to_script_wrapper
def __getitem__(self, key: str) -> Module:
return self._modules[key]
def __setitem__(self, key: str, module: Module) -> None:
self.add_module(key, module)
def __delitem__(self, key: str) -> None:
del self._modules[key]
@_copy_to_script_wrapper
def __len__(self) -> int:
return len(self._modules)
@_copy_to_script_wrapper
def __iter__(self) -> Iterator[str]:
return iter(self._modules)
@_copy_to_script_wrapper
def __contains__(self, key: str) -> bool:
return key in self._modules
def clear(self) -> None:
"""Remove all items from the ModuleDict.
"""
self._modules.clear()
def pop(self, key: str) -> Module:
r"""Remove key from the ModuleDict and return its module.
Args:
key (str): key to pop from the ModuleDict
"""
v = self[key]
del self[key]
return v
@_copy_to_script_wrapper
def keys(self) -> Iterable[str]:
r"""Return an iterable of the ModuleDict keys.
"""
return self._modules.keys()
@_copy_to_script_wrapper
def items(self) -> Iterable[Tuple[str, Module]]:
r"""Return an iterable of the ModuleDict key/value pairs.
"""
return self._modules.items()
@_copy_to_script_wrapper
def values(self) -> Iterable[Module]:
r"""Return an iterable of the ModuleDict values.
"""
return self._modules.values()
def update(self, modules: Mapping[str, Module]) -> None:
r"""Update the :class:`~torch.nn.ModuleDict` with the key-value pairs from a
mapping or an iterable, overwriting existing keys.
.. note::
If :attr:`modules` is an ``OrderedDict``, a :class:`~torch.nn.ModuleDict`, or
an iterable of key-value pairs, the order of new elements in it is preserved.
Args:
modules (iterable): a mapping (dictionary) from string to :class:`~torch.nn.Module`,
or an iterable of key-value pairs of type (string, :class:`~torch.nn.Module`)
"""
if not isinstance(modules, container_abcs.Iterable):
raise TypeError("ModuleDict.update should be called with an "
"iterable of key/value pairs, but got " +
type(modules).__name__)
if isinstance(modules, (OrderedDict, ModuleDict, container_abcs.Mapping)):
for key, module in modules.items():
self[key] = module
else:
# modules here can be a list with two items
for j, m in enumerate(modules):
if not isinstance(m, container_abcs.Iterable):
raise TypeError("ModuleDict update sequence element "
"#" + str(j) + " should be Iterable; is" +
type(m).__name__)
if not len(m) == 2:
raise ValueError("ModuleDict update sequence element "
"#" + str(j) + " has length " + str(len(m)) +
"; 2 is required")
# modules can be Mapping (what it's typed at), or a list: [(name1, module1), (name2, module2)]
# that's too cumbersome to type correctly with overloads, so we add an ignore here
self[m[0]] = m[1] # type: ignore[assignment]
# remove forward alltogether to fallback on Module's _forward_unimplemented
class ParameterList(Module):
r"""Holds parameters in a list.
:class:`~torch.nn.ParameterList` can be used like a regular Python
list, but Tensors that are :class:`~torch.nn.Parameter` are properly registered,
and will be visible by all :class:`~torch.nn.Module` methods.
Note that the constructor, assigning an element of the list, the
:meth:`~torch.nn.ParameterDict.append` method and the :meth:`~torch.nn.ParameterDict.extend`
method will convert any :class:`~torch.Tensor` into :class:`~torch.nn.Parameter`.
Args:
parameters (iterable, optional): an iterable of elements to add to the list.
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.params = nn.ParameterList([nn.Parameter(torch.randn(10, 10)) for i in range(10)])
def forward(self, x):
# ParameterList can act as an iterable, or be indexed using ints
for i, p in enumerate(self.params):
x = self.params[i // 2].mm(x) + p.mm(x)
return x
"""
def __init__(self, values: Optional[Iterable[Any]] = None) -> None:
super(ParameterList, self).__init__()
self._size = 0
if values is not None:
self += values
def _get_abs_string_index(self, idx):
"""Get the absolute index for the list of modules"""
idx = operator.index(idx)
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
return str(idx)
@overload
def __getitem__(self, idx: int) -> Any:
...
@overload
def __getitem__(self: T, idx: slice) -> T:
...
def __getitem__(self, idx):
if isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
out = self.__class__()
for i in range(start, stop, step):
out.append(self[i])
return out
else:
idx = self._get_abs_string_index(idx)
return getattr(self, str(idx))
def __setitem__(self, idx: int, param: Any) -> None:
# Note that all other function that add an entry to the list part of
# the ParameterList end up here. So this is the only place where we need
# to wrap things into Parameter if needed.
# Objects added via setattr() are not in the list part and thus won't
# call into this function.
idx = self._get_abs_string_index(idx)
if isinstance(param, torch.Tensor) and not isinstance(param, Parameter):
param = Parameter(param)
return setattr(self, str(idx), param)
def __len__(self) -> int:
return self._size
def __iter__(self) -> Iterator[Any]:
return iter(self[i] for i in range(len(self)))
def __iadd__(self, parameters: Iterable[Any]) -> 'ParameterList':
return self.extend(parameters)
def __dir__(self):
keys = super(ParameterList, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def append(self, value: Any) -> 'ParameterList':
"""Appends a given value at the end of the list.
Args:
value (Any): value to append
"""
new_idx = len(self)
self._size += 1
self[new_idx] = value
return self
def extend(self, values: Iterable[Any]) -> 'ParameterList':
"""Appends values from a Python iterable to the end of the list.
Args:
values (iterable): iterable of values to append
"""
# Tensor is an iterable but we never want to unpack it here
if not isinstance(values, container_abcs.Iterable) or isinstance(values, torch.Tensor):
raise TypeError("ParameterList.extend should be called with an "
"iterable, but got " + type(values).__name__)
for value in values:
self.append(value)
return self
def extra_repr(self) -> str:
child_lines = []
for k, p in enumerate(self):
if isinstance(p, torch.Tensor):
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = '{} containing: [{} of size {}{}]'.format(
"Parameter" if isinstance(p, Parameter) else "Tensor",
p.dtype, size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
else:
child_lines.append(' (' + str(k) + '): Object of type: ' + type(p).__name__)
tmpstr = '\n'.join(child_lines)
return tmpstr
def __call__(self, *args, **kwargs):
raise RuntimeError('ParameterList should not be called.')
class ParameterDict(Module):
r"""Holds parameters in a dictionary.
ParameterDict can be indexed like a regular Python dictionary, but Parameters it
contains are properly registered, and will be visible by all Module methods.
Other objects are treated as would be done by a regular Python dictionary
:class:`~torch.nn.ParameterDict` is an **ordered** dictionary.
:meth:`~torch.nn.ParameterDict.update` with other unordered mapping
types (e.g., Python's plain ``dict``) does not preserve the order of the
merged mapping. On the other hand, ``OrderedDict`` or another :class:`~torch.nn.ParameterDict`
will preserve their ordering.
Note that the constructor, assigning an element of the dictionary and the
:meth:`~torch.nn.ParameterDict.update` method will convert any :class:`~torch.Tensor` into
:class:`~torch.nn.Parameter`.
Args:
values (iterable, optional): a mapping (dictionary) of
(string : Any) or an iterable of key-value pairs
of type (string, Any)
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.params = nn.ParameterDict({
'left': nn.Parameter(torch.randn(5, 10)),
'right': nn.Parameter(torch.randn(5, 10))
})
def forward(self, x, choice):
x = self.params[choice].mm(x)
return x
"""
def __init__(self, parameters: Any = None) -> None:
super(ParameterDict, self).__init__()
self._keys: Dict[str, None] = {}
if parameters is not None:
self.update(parameters)
def _key_to_attr(self, key: str) -> str:
if not isinstance(key, str):
raise TypeError("Index given to ParameterDict cannot be used as a key as it is "
f"not a string (type is '{type(key).__name__}'). Open an issue on "
"github if you need non-string keys.")
else:
# Use the key as-is so that `.named_parameters()` returns the right thing
return key
def __getitem__(self, key: str) -> Any:
attr = self._key_to_attr(key)
return getattr(self, attr)
def __setitem__(self, key: str, value: Any) -> None:
# Note that all other function that add an entry to the dictionary part of
# the ParameterDict end up here. So this is the only place where we need
# to wrap things into Parameter if needed.
# Objects added via setattr() are not in the dictionary part and thus won't
# call into this function.
self._keys[key] = None
attr = self._key_to_attr(key)
if isinstance(value, torch.Tensor) and not isinstance(value, Parameter):
value = Parameter(value)
setattr(self, attr, value)
def __delitem__(self, key: str) -> None:
del self._keys[key]
attr = self._key_to_attr(key)
delattr(self, attr)
def __len__(self) -> int:
return len(self._keys)
def __iter__(self) -> Iterator[str]:
return iter(self._keys)
def __reversed__(self) -> Iterator[str]:
return reversed(list(self._keys))
def copy(self) -> 'ParameterDict':
"""Returns a copy of this :class:`~torch.nn.ParameterDict` instance.
"""
# We have to use an OrderedDict because the ParameterDict constructor
# behaves differently on plain dict vs OrderedDict
return ParameterDict(OrderedDict((k, self[k]) for k in self._keys))
def __contains__(self, key: str) -> bool:
return key in self._keys
def setdefault(self, key: str, default: Optional[Any] = None) -> Any:
"""If key is in the ParameterDict, return its value.
If not, insert `key` with a parameter `default` and return `default`.
`default` defaults to `None`.
Args:
key (str): key to set default for
default (Any): the parameter set to the key
"""
if key not in self:
self[key] = default
return self[key]
def clear(self) -> None:
"""Remove all items from the ParameterDict.
"""
for k in self._keys.copy():
del self[k]
def pop(self, key: str) -> Any:
r"""Remove key from the ParameterDict and return its parameter.
Args:
key (str): key to pop from the ParameterDict
"""
v = self[key]
del self[key]
return v
def popitem(self) -> Tuple[str, Any]:
"""Remove and return the last inserted `(key, parameter)` pair
from the ParameterDict
"""
k, _ = self._keys.popitem()
# We need the key in the _keys to be able to access/del
self._keys[k] = None
val = self[k]
del self[k]
return k, val
def get(self, key: str, default: Optional[Any] = None) -> Any:
r"""Return the parameter associated with key if present.
Otherwise return default if provided, None if not.
Args:
key (str): key to get from the ParameterDict
default (Parameter, optional): value to return if key not present
"""
return self[key] if key in self else default
def fromkeys(self, keys: Iterable[str], default: Optional[Any] = None) -> 'ParameterDict':
r"""Return a new ParameterDict with the keys provided
Args:
keys (iterable, string): keys to make the new ParameterDict from
default (Parameter, optional): value to set for all keys
"""
return ParameterDict(((k, default) for k in keys))
def keys(self) -> Iterable[str]:
r"""Return an iterable of the ParameterDict keys.
"""
return self._keys.keys()
def items(self) -> Iterable[Tuple[str, Any]]:
r"""Return an iterable of the ParameterDict key/value pairs.
"""
return ((k, self[k]) for k in self._keys)
def values(self) -> Iterable[Any]:
r"""Return an iterable of the ParameterDict values.
"""
return (self[k] for k in self._keys)
def update(self, parameters: Union[Mapping[str, Any], 'ParameterDict']) -> None:
r"""Update the :class:`~torch.nn.ParameterDict` with the key-value pairs from a
mapping or an iterable, overwriting existing keys.
.. note::
If :attr:`parameters` is an ``OrderedDict``, a :class:`~torch.nn.ParameterDict`, or
an iterable of key-value pairs, the order of new elements in it is preserved.
Args:
parameters (iterable): a mapping (dictionary) from string to
:class:`~torch.nn.Parameter`, or an iterable of
key-value pairs of type (string, :class:`~torch.nn.Parameter`)
"""
if not isinstance(parameters, container_abcs.Iterable):
raise TypeError("ParametersDict.update should be called with an "
"iterable of key/value pairs, but got " +
type(parameters).__name__)
if isinstance(parameters, (OrderedDict, ParameterDict)):
for key, parameter in parameters.items():
self[key] = parameter
elif isinstance(parameters, container_abcs.Mapping):
for key, parameter in sorted(parameters.items()):
self[key] = parameter
else:
for j, p in enumerate(parameters):
if not isinstance(p, container_abcs.Iterable):
raise TypeError("ParameterDict update sequence element "
"#" + str(j) + " should be Iterable; is" +
type(p).__name__)
if not len(p) == 2:
raise ValueError("ParameterDict update sequence element "
"#" + str(j) + " has length " + str(len(p)) +
"; 2 is required")
# parameters as length-2 list too cumbersome to type, see ModuleDict.update comment
self[p[0]] = p[1] # type: ignore[assignment]
def extra_repr(self) -> str:
child_lines = []
for k, p in self.items():
if isinstance(p, torch.Tensor):
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = '{} containing: [{} of size {}{}]'.format(
"Parameter" if isinstance(p, Parameter) else "Tensor",
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
else:
child_lines.append(' (' + str(k) + '): Object of type: ' + type(p).__name__)
tmpstr = '\n'.join(child_lines)
return tmpstr
def __call__(self, input):
raise RuntimeError('ParameterDict should not be called.')
def __or__(self, other: 'ParameterDict') -> 'ParameterDict':
copy = self.copy()
copy.update(other)
return copy
def __ror__(self, other: 'ParameterDict') -> 'ParameterDict':
copy = other.copy()
copy.update(self)
return copy
def __ior__(self, other : 'ParameterDict') -> 'ParameterDict':
self.update(other)
return self
| pytorch-master | torch/nn/modules/container.py |
from .module import Module
from .. import functional as F
from torch import Tensor
__all__ = ['PixelShuffle', 'PixelUnshuffle']
class PixelShuffle(Module):
r"""Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)`
to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an upscale factor.
This is useful for implementing efficient sub-pixel convolution
with a stride of :math:`1/r`.
See the paper:
`Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_
by Shi et. al (2016) for more details.
Args:
upscale_factor (int): factor to increase spatial resolution by
Shape:
- Input: :math:`(*, C_{in}, H_{in}, W_{in})`, where * is zero or more batch dimensions
- Output: :math:`(*, C_{out}, H_{out}, W_{out})`, where
.. math::
C_{out} = C_{in} \div \text{upscale\_factor}^2
.. math::
H_{out} = H_{in} \times \text{upscale\_factor}
.. math::
W_{out} = W_{in} \times \text{upscale\_factor}
Examples::
>>> pixel_shuffle = nn.PixelShuffle(3)
>>> input = torch.randn(1, 9, 4, 4)
>>> output = pixel_shuffle(input)
>>> print(output.size())
torch.Size([1, 1, 12, 12])
.. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network:
https://arxiv.org/abs/1609.05158
"""
__constants__ = ['upscale_factor']
upscale_factor: int
def __init__(self, upscale_factor: int) -> None:
super(PixelShuffle, self).__init__()
self.upscale_factor = upscale_factor
def forward(self, input: Tensor) -> Tensor:
return F.pixel_shuffle(input, self.upscale_factor)
def extra_repr(self) -> str:
return 'upscale_factor={}'.format(self.upscale_factor)
class PixelUnshuffle(Module):
r"""Reverses the :class:`~torch.nn.PixelShuffle` operation by rearranging elements
in a tensor of shape :math:`(*, C, H \times r, W \times r)` to a tensor of shape
:math:`(*, C \times r^2, H, W)`, where r is a downscale factor.
See the paper:
`Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_
by Shi et. al (2016) for more details.
Args:
downscale_factor (int): factor to decrease spatial resolution by
Shape:
- Input: :math:`(*, C_{in}, H_{in}, W_{in})`, where * is zero or more batch dimensions
- Output: :math:`(*, C_{out}, H_{out}, W_{out})`, where
.. math::
C_{out} = C_{in} \times \text{downscale\_factor}^2
.. math::
H_{out} = H_{in} \div \text{downscale\_factor}
.. math::
W_{out} = W_{in} \div \text{downscale\_factor}
Examples::
>>> pixel_unshuffle = nn.PixelUnshuffle(3)
>>> input = torch.randn(1, 1, 12, 12)
>>> output = pixel_unshuffle(input)
>>> print(output.size())
torch.Size([1, 9, 4, 4])
.. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network:
https://arxiv.org/abs/1609.05158
"""
__constants__ = ['downscale_factor']
downscale_factor: int
def __init__(self, downscale_factor: int) -> None:
super(PixelUnshuffle, self).__init__()
self.downscale_factor = downscale_factor
def forward(self, input: Tensor) -> Tensor:
return F.pixel_unshuffle(input, self.downscale_factor)
def extra_repr(self) -> str:
return 'downscale_factor={}'.format(self.downscale_factor)
| pytorch-master | torch/nn/modules/pixelshuffle.py |
# -*- coding: utf-8 -*-
from collections import namedtuple
import torch
from torch import Tensor
from typing import List, Sequence
from . import Sequential, ModuleList, Linear
from .module import Module
from ..functional import log_softmax
__all__ = ['AdaptiveLogSoftmaxWithLoss']
_ASMoutput = namedtuple('_ASMoutput', ['output', 'loss'])
class AdaptiveLogSoftmaxWithLoss(Module):
r"""Efficient softmax approximation as described in
`Efficient softmax approximation for GPUs by Edouard Grave, Armand Joulin,
Moustapha Cissé, David Grangier, and Hervé Jégou
<https://arxiv.org/abs/1609.04309>`__.
Adaptive softmax is an approximate strategy for training models with large
output spaces. It is most effective when the label distribution is highly
imbalanced, for example in natural language modelling, where the word
frequency distribution approximately follows the `Zipf's law`_.
Adaptive softmax partitions the labels into several clusters, according to
their frequency. These clusters may contain different number of targets
each.
Additionally, clusters containing less frequent labels assign lower
dimensional embeddings to those labels, which speeds up the computation.
For each minibatch, only clusters for which at least one target is
present are evaluated.
The idea is that the clusters which are accessed frequently
(like the first one, containing most frequent labels), should also be cheap
to compute -- that is, contain a small number of assigned labels.
We highly recommend taking a look at the original paper for more details.
* :attr:`cutoffs` should be an ordered Sequence of integers sorted
in the increasing order.
It controls number of clusters and the partitioning of targets into
clusters. For example setting ``cutoffs = [10, 100, 1000]``
means that first `10` targets will be assigned
to the 'head' of the adaptive softmax, targets `11, 12, ..., 100` will be
assigned to the first cluster, and targets `101, 102, ..., 1000` will be
assigned to the second cluster, while targets
`1001, 1002, ..., n_classes - 1` will be assigned
to the last, third cluster.
* :attr:`div_value` is used to compute the size of each additional cluster,
which is given as
:math:`\left\lfloor\frac{\texttt{in\_features}}{\texttt{div\_value}^{idx}}\right\rfloor`,
where :math:`idx` is the cluster index (with clusters
for less frequent words having larger indices,
and indices starting from :math:`1`).
* :attr:`head_bias` if set to True, adds a bias term to the 'head' of the
adaptive softmax. See paper for details. Set to False in the official
implementation.
.. warning::
Labels passed as inputs to this module should be sorted according to
their frequency. This means that the most frequent label should be
represented by the index `0`, and the least frequent
label should be represented by the index `n_classes - 1`.
.. note::
This module returns a ``NamedTuple`` with ``output``
and ``loss`` fields. See further documentation for details.
.. note::
To compute log-probabilities for all classes, the ``log_prob``
method can be used.
Args:
in_features (int): Number of features in the input tensor
n_classes (int): Number of classes in the dataset
cutoffs (Sequence): Cutoffs used to assign targets to their buckets
div_value (float, optional): value used as an exponent to compute sizes
of the clusters. Default: 4.0
head_bias (bool, optional): If ``True``, adds a bias term to the 'head' of the
adaptive softmax. Default: ``False``
Returns:
``NamedTuple`` with ``output`` and ``loss`` fields:
* **output** is a Tensor of size ``N`` containing computed target
log probabilities for each example
* **loss** is a Scalar representing the computed negative
log likelihood loss
Shape:
- input: :math:`(N, \texttt{in\_features})` or :math:`(\texttt{in\_features})`
- target: :math:`(N)` or :math:`()` where each value satisfies :math:`0 <= \texttt{target[i]} <= \texttt{n\_classes}`
- output1: :math:`(N)` or :math:`()`
- output2: ``Scalar``
.. _Zipf's law: https://en.wikipedia.org/wiki/Zipf%27s_law
"""
in_features: int
n_classes: int
cutoffs: List[int]
div_value: float
head_bias: bool
head: Linear
tail: ModuleList
def __init__(
self,
in_features: int,
n_classes: int,
cutoffs: Sequence[int],
div_value: float = 4.,
head_bias: bool = False,
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(AdaptiveLogSoftmaxWithLoss, self).__init__()
cutoffs = list(cutoffs)
if (cutoffs != sorted(cutoffs)) \
or (min(cutoffs) <= 0) \
or (max(cutoffs) > (n_classes - 1)) \
or (len(set(cutoffs)) != len(cutoffs)) \
or any([int(c) != c for c in cutoffs]):
raise ValueError("cutoffs should be a sequence of unique, positive "
"integers sorted in an increasing order, where "
"each value is between 1 and n_classes-1")
self.in_features = in_features
self.n_classes = n_classes
self.cutoffs = cutoffs + [n_classes]
self.div_value = div_value
self.head_bias = head_bias
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
self.head = Linear(self.in_features, self.head_size, bias=self.head_bias,
**factory_kwargs)
self.tail = ModuleList()
for i in range(self.n_clusters):
hsz = int(self.in_features // (self.div_value ** (i + 1)))
osz = self.cutoffs[i + 1] - self.cutoffs[i]
projection = Sequential(
Linear(self.in_features, hsz, bias=False, **factory_kwargs),
Linear(hsz, osz, bias=False, **factory_kwargs),
)
self.tail.append(projection)
def reset_parameters(self) -> None:
self.head.reset_parameters()
for i2h, h2o in self.tail:
i2h.reset_parameters()
h2o.reset_parameters()
def forward(self, input_: Tensor, target_: Tensor) -> _ASMoutput:
targ_dim = target_.dim()
if targ_dim == 1:
if input_.size(0) != target_.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if input_.dim() != 2:
raise RuntimeError('1D target tensor expects 2D input tensors, '
'but found inputs with size', input_.size())
elif targ_dim == 0:
if input_.dim() != 1:
raise RuntimeError('0D target tensor expects 1D input tensors, '
'but found inputs with size', input_.size())
else:
raise RuntimeError('0D or 1D target tensor expected, '
'multi-target not supported')
is_batched = targ_dim > 0
input = input_ if is_batched else input_.unsqueeze(0)
target = target_ if is_batched else target_.unsqueeze(0)
used_rows = 0
batch_size = target.size(0)
output = input.new_zeros(batch_size)
gather_inds = target.new_empty(batch_size)
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
low_idx = cutoff_values[i]
high_idx = cutoff_values[i + 1]
target_mask = (target >= low_idx) & (target < high_idx)
row_indices = target_mask.nonzero().squeeze()
if row_indices.numel() == 0:
continue
if i == 0:
gather_inds.index_copy_(0, row_indices, target[target_mask])
else:
relative_target = target[target_mask] - low_idx
input_subset = input.index_select(0, row_indices)
cluster_output = self.tail[i - 1](input_subset)
cluster_index = self.shortlist_size + i - 1
gather_inds.index_fill_(0, row_indices, cluster_index)
cluster_logprob = log_softmax(cluster_output, dim=1)
local_logprob = cluster_logprob.gather(1, relative_target.unsqueeze(1))
output.index_copy_(0, row_indices, local_logprob.squeeze(1))
used_rows += row_indices.numel()
if used_rows != batch_size:
raise RuntimeError("Target values should be in [0, {}], "
"but values in range [{}, {}] "
"were found. ".format(self.n_classes - 1,
target.min().item(),
target.max().item()))
head_output = self.head(input)
head_logprob = log_softmax(head_output, dim=1)
output += head_logprob.gather(1, gather_inds.unsqueeze(1)).squeeze()
loss = (-output).mean()
if not is_batched:
output = output.squeeze(0)
return _ASMoutput(output, loss)
def _get_full_log_prob(self, input, head_output):
""" Given input tensor, and output of `self.head`,
compute the log of the full distribution """
out = input.new_empty((head_output.size(0), self.n_classes))
head_logprob = log_softmax(head_output, dim=1)
out[:, :self.shortlist_size] = head_logprob[:, :self.shortlist_size]
for i, (start_idx, stop_idx) in enumerate(zip(self.cutoffs, self.cutoffs[1:])):
cluster_output = self.tail[i](input)
cluster_logprob = log_softmax(cluster_output, dim=1)
output_logprob = cluster_logprob + head_logprob[:, self.shortlist_size + i].unsqueeze(1)
out[:, start_idx:stop_idx] = output_logprob
return out
def log_prob(self, input: Tensor) -> Tensor:
r""" Computes log probabilities for all :math:`\texttt{n\_classes}`
Args:
input (Tensor): a minibatch of examples
Returns:
log-probabilities of for each class :math:`c`
in range :math:`0 <= c <= \texttt{n\_classes}`, where :math:`\texttt{n\_classes}` is a
parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.
Shape:
- Input: :math:`(N, \texttt{in\_features})`
- Output: :math:`(N, \texttt{n\_classes})`
"""
head_output = self.head(input)
return self._get_full_log_prob(input, head_output)
def predict(self, input: Tensor) -> Tensor:
r""" This is equivalent to `self.log_prob(input).argmax(dim=1)`,
but is more efficient in some cases.
Args:
input (Tensor): a minibatch of examples
Returns:
output (Tensor): a class with the highest probability for each example
Shape:
- Input: :math:`(N, \texttt{in\_features})`
- Output: :math:`(N)`
"""
head_output = self.head(input)
output = torch.argmax(head_output, dim=1)
not_in_shortlist = (output >= self.shortlist_size)
all_in_shortlist = not (not_in_shortlist.any())
if all_in_shortlist:
return output
elif not_in_shortlist.all():
log_prob = self._get_full_log_prob(input, head_output)
return torch.argmax(log_prob, dim=1)
else:
log_prob = self._get_full_log_prob(input[not_in_shortlist],
head_output[not_in_shortlist])
output[not_in_shortlist] = torch.argmax(log_prob, dim=1)
return output
| pytorch-master | torch/nn/modules/adaptive.py |
import warnings
from .distance import PairwiseDistance
from .module import Module
from .. import functional as F
from .. import _reduction as _Reduction
from torch import Tensor
from typing import Callable, Optional
__all__ = ['L1Loss', 'NLLLoss', 'NLLLoss2d', 'PoissonNLLLoss', 'GaussianNLLLoss', 'KLDivLoss',
'MSELoss', 'BCELoss', 'BCEWithLogitsLoss', 'HingeEmbeddingLoss', 'MultiLabelMarginLoss',
'SmoothL1Loss', 'HuberLoss', 'SoftMarginLoss', 'CrossEntropyLoss', 'MultiLabelSoftMarginLoss',
'CosineEmbeddingLoss', 'MarginRankingLoss', 'MultiMarginLoss', 'TripletMarginLoss',
'TripletMarginWithDistanceLoss', 'CTCLoss']
class _Loss(Module):
reduction: str
def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
super(_Loss, self).__init__()
if size_average is not None or reduce is not None:
self.reduction: str = _Reduction.legacy_get_string(size_average, reduce)
else:
self.reduction = reduction
class _WeightedLoss(_Loss):
def __init__(self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = 'mean') -> None:
super(_WeightedLoss, self).__init__(size_average, reduce, reduction)
self.register_buffer('weight', weight)
self.weight: Optional[Tensor]
class L1Loss(_Loss):
r"""Creates a criterion that measures the mean absolute error (MAE) between each element in
the input :math:`x` and target :math:`y`.
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = \left| x_n - y_n \right|,
where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then:
.. math::
\ell(x, y) =
\begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
of :math:`n` elements each.
The sum operation still operates over all the elements, and divides by :math:`n`.
The division by :math:`n` can be avoided if one sets ``reduction = 'sum'``.
Supports real-valued and complex-valued inputs.
Args:
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Target: :math:`(*)`, same shape as the input.
- Output: scalar. If :attr:`reduction` is ``'none'``, then
:math:`(*)`, same shape as the input.
Examples::
>>> loss = nn.L1Loss()
>>> input = torch.randn(3, 5, requires_grad=True)
>>> target = torch.randn(3, 5)
>>> output = loss(input, target)
>>> output.backward()
"""
__constants__ = ['reduction']
def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
super(L1Loss, self).__init__(size_average, reduce, reduction)
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.l1_loss(input, target, reduction=self.reduction)
class NLLLoss(_WeightedLoss):
r"""The negative log likelihood loss. It is useful to train a classification
problem with `C` classes.
If provided, the optional argument :attr:`weight` should be a 1D Tensor assigning
weight to each of the classes. This is particularly useful when you have an
unbalanced training set.
The `input` given through a forward call is expected to contain
log-probabilities of each class. `input` has to be a Tensor of size either
:math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)`
with :math:`K \geq 1` for the `K`-dimensional case. The latter is useful for
higher dimension inputs, such as computing NLL loss per-pixel for 2D images.
Obtaining log-probabilities in a neural network is easily achieved by
adding a `LogSoftmax` layer in the last layer of your network.
You may use `CrossEntropyLoss` instead, if you prefer not to add an extra
layer.
The `target` that this loss expects should be a class index in the range :math:`[0, C-1]`
where `C = number of classes`; if `ignore_index` is specified, this loss also accepts
this class index (this index may not necessarily be in the class range).
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_{y_n} x_{n,y_n}, \quad
w_{c} = \text{weight}[c] \cdot \mathbb{1}\{c \not= \text{ignore\_index}\},
where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight, and
:math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then
.. math::
\ell(x, y) = \begin{cases}
\sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n}} l_n, &
\text{if reduction} = \text{`mean';}\\
\sum_{n=1}^N l_n, &
\text{if reduction} = \text{`sum'.}
\end{cases}
Args:
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, it has to be a Tensor of size `C`. Otherwise, it is
treated as if having all ones.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``None``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When
:attr:`size_average` is ``True``, the loss is averaged over
non-ignored targets.
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``None``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will
be applied, ``'mean'``: the weighted mean of the output is taken,
``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in
the meantime, specifying either of those two args will override
:attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(N, C)` or :math:`(C)`, where `C = number of classes`, or
:math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`
in the case of `K`-dimensional loss.
- Target: :math:`(N)` or :math:`()`, where each value is
:math:`0 \leq \text{targets}[i] \leq C-1`, or
:math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of
K-dimensional loss.
- Output: If :attr:`reduction` is ``'none'``, shape :math:`(N)` or
:math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of K-dimensional loss.
Otherwise, scalar.
Examples::
>>> m = nn.LogSoftmax(dim=1)
>>> loss = nn.NLLLoss()
>>> # input is of size N x C = 3 x 5
>>> input = torch.randn(3, 5, requires_grad=True)
>>> # each element in target has to have 0 <= value < C
>>> target = torch.tensor([1, 0, 4])
>>> output = loss(m(input), target)
>>> output.backward()
>>>
>>>
>>> # 2D loss example (used, for example, with image inputs)
>>> N, C = 5, 4
>>> loss = nn.NLLLoss()
>>> # input is of size N x C x height x width
>>> data = torch.randn(N, 16, 10, 10)
>>> conv = nn.Conv2d(16, C, (3, 3))
>>> m = nn.LogSoftmax(dim=1)
>>> # each element in target has to have 0 <= value < C
>>> target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
>>> output = loss(m(conv(data)), target)
>>> output.backward()
"""
__constants__ = ['ignore_index', 'reduction']
ignore_index: int
def __init__(self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100,
reduce=None, reduction: str = 'mean') -> None:
super(NLLLoss, self).__init__(weight, size_average, reduce, reduction)
self.ignore_index = ignore_index
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.nll_loss(input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)
class NLLLoss2d(NLLLoss):
def __init__(self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100,
reduce=None, reduction: str = 'mean') -> None:
warnings.warn("NLLLoss2d has been deprecated. "
"Please use NLLLoss instead as a drop-in replacement and see "
"https://pytorch.org/docs/master/nn.html#torch.nn.NLLLoss for more details.")
super(NLLLoss2d, self).__init__(weight, size_average, ignore_index, reduce, reduction)
class PoissonNLLLoss(_Loss):
r"""Negative log likelihood loss with Poisson distribution of target.
The loss can be described as:
.. math::
\text{target} \sim \mathrm{Poisson}(\text{input})
\text{loss}(\text{input}, \text{target}) = \text{input} - \text{target} * \log(\text{input})
+ \log(\text{target!})
The last term can be omitted or approximated with Stirling formula. The
approximation is used for target values more than 1. For targets less or
equal to 1 zeros are added to the loss.
Args:
log_input (bool, optional): if ``True`` the loss is computed as
:math:`\exp(\text{input}) - \text{target}*\text{input}`, if ``False`` the loss is
:math:`\text{input} - \text{target}*\log(\text{input}+\text{eps})`.
full (bool, optional): whether to compute full loss, i. e. to add the
Stirling approximation term
.. math::
\text{target}*\log(\text{target}) - \text{target} + 0.5 * \log(2\pi\text{target}).
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
eps (float, optional): Small value to avoid evaluation of :math:`\log(0)` when
:attr:`log_input = False`. Default: 1e-8
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Examples::
>>> loss = nn.PoissonNLLLoss()
>>> log_input = torch.randn(5, 2, requires_grad=True)
>>> target = torch.randn(5, 2)
>>> output = loss(log_input, target)
>>> output.backward()
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Target: :math:`(*)`, same shape as the input.
- Output: scalar by default. If :attr:`reduction` is ``'none'``, then :math:`(*)`,
the same shape as the input.
"""
__constants__ = ['log_input', 'full', 'eps', 'reduction']
log_input: bool
full: bool
eps: float
def __init__(self, log_input: bool = True, full: bool = False, size_average=None,
eps: float = 1e-8, reduce=None, reduction: str = 'mean') -> None:
super(PoissonNLLLoss, self).__init__(size_average, reduce, reduction)
self.log_input = log_input
self.full = full
self.eps = eps
def forward(self, log_input: Tensor, target: Tensor) -> Tensor:
return F.poisson_nll_loss(log_input, target, log_input=self.log_input, full=self.full,
eps=self.eps, reduction=self.reduction)
class GaussianNLLLoss(_Loss):
r"""Gaussian negative log likelihood loss.
The targets are treated as samples from Gaussian distributions with
expectations and variances predicted by the neural network. For a
``target`` tensor modelled as having Gaussian distribution with a tensor
of expectations ``input`` and a tensor of positive variances ``var`` the loss is:
.. math::
\text{loss} = \frac{1}{2}\left(\log\left(\text{max}\left(\text{var},
\ \text{eps}\right)\right) + \frac{\left(\text{input} - \text{target}\right)^2}
{\text{max}\left(\text{var}, \ \text{eps}\right)}\right) + \text{const.}
where :attr:`eps` is used for stability. By default, the constant term of
the loss function is omitted unless :attr:`full` is ``True``. If ``var`` is not the same
size as ``input`` (due to a homoscedastic assumption), it must either have a final dimension
of 1 or have one fewer dimension (with all other sizes being the same) for correct broadcasting.
Args:
full (bool, optional): include the constant term in the loss
calculation. Default: ``False``.
eps (float, optional): value used to clamp ``var`` (see note below), for
stability. Default: 1e-6.
reduction (str, optional): specifies the reduction to apply to the
output:``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction
will be applied, ``'mean'``: the output is the average of all batch
member losses, ``'sum'``: the output is the sum of all batch member
losses. Default: ``'mean'``.
Shape:
- Input: :math:`(N, *)` or :math:`(*)` where :math:`*` means any number of additional
dimensions
- Target: :math:`(N, *)` or :math:`(*)`, same shape as the input, or same shape as the input
but with one dimension equal to 1 (to allow for broadcasting)
- Var: :math:`(N, *)` or :math:`(*)`, same shape as the input, or same shape as the input but
with one dimension equal to 1, or same shape as the input but with one fewer
dimension (to allow for broadcasting)
- Output: scalar if :attr:`reduction` is ``'mean'`` (default) or
``'sum'``. If :attr:`reduction` is ``'none'``, then :math:`(N, *)`, same
shape as the input
Examples::
>>> loss = nn.GaussianNLLLoss()
>>> input = torch.randn(5, 2, requires_grad=True)
>>> target = torch.randn(5, 2)
>>> var = torch.ones(5, 2, requires_grad=True) #heteroscedastic
>>> output = loss(input, target, var)
>>> output.backward()
>>> loss = nn.GaussianNLLLoss()
>>> input = torch.randn(5, 2, requires_grad=True)
>>> target = torch.randn(5, 2)
>>> var = torch.ones(5, 1, requires_grad=True) #homoscedastic
>>> output = loss(input, target, var)
>>> output.backward()
Note:
The clamping of ``var`` is ignored with respect to autograd, and so the
gradients are unaffected by it.
Reference:
Nix, D. A. and Weigend, A. S., "Estimating the mean and variance of the
target probability distribution", Proceedings of 1994 IEEE International
Conference on Neural Networks (ICNN'94), Orlando, FL, USA, 1994, pp. 55-60
vol.1, doi: 10.1109/ICNN.1994.374138.
"""
__constants__ = ['full', 'eps', 'reduction']
full: bool
eps: float
def __init__(self, *, full: bool = False, eps: float = 1e-6, reduction: str = 'mean') -> None:
super(GaussianNLLLoss, self).__init__(None, None, reduction)
self.full = full
self.eps = eps
def forward(self, input: Tensor, target: Tensor, var: Tensor) -> Tensor:
return F.gaussian_nll_loss(input, target, var, full=self.full, eps=self.eps, reduction=self.reduction)
class KLDivLoss(_Loss):
r"""The Kullback-Leibler divergence loss.
For tensors of the same shape :math:`y_{\text{pred}},\ y_{\text{true}}`,
where :math:`y_{\text{pred}}` is the :attr:`input` and :math:`y_{\text{true}}` is the
:attr:`target`, we define the **pointwise KL-divergence** as
.. math::
L(y_{\text{pred}},\ y_{\text{true}})
= y_{\text{true}} \cdot \log \frac{y_{\text{true}}}{y_{\text{pred}}}
= y_{\text{true}} \cdot (\log y_{\text{true}} - \log y_{\text{pred}})
To avoid underflow issues when computing this quantity, this loss expects the argument
:attr:`input` in the log-space. The argument :attr:`target` may also be provided in the
log-space if :attr:`log_target`\ `= True`.
To summarise, this function is roughly equivalent to computing
.. code-block:: python
if not log_target: # default
loss_pointwise = target * (target.log() - input)
else:
loss_pointwise = target.exp() * (target - input)
and then reducing this result depending on the argument :attr:`reduction` as
.. code-block:: python
if reduction == "mean": # default
loss = loss_pointwise.mean()
elif reduction == "batchmean": # mathematically correct
loss = loss_pointwise.sum() / input.size(0)
elif reduction == "sum":
loss = loss_pointwise.sum()
else: # reduction == "none"
loss = loss_pointwise
.. note::
As all the other losses in PyTorch, this function expects the first argument,
:attr:`input`, to be the output of the model (e.g. the neural network)
and the second, :attr:`target`, to be the observations in the dataset.
This differs from the standard mathematical notation :math:`KL(P\ ||\ Q)` where
:math:`P` denotes the distribution of the observations and :math:`Q` denotes the model.
.. warning::
:attr:`reduction`\ `= "mean"` doesn't return the true KL divergence value, please use
:attr:`reduction`\ `= "batchmean"` which aligns with the mathematical definition.
In a future release, `"mean"` will be changed to be the same as `"batchmean"`.
Args:
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to `False`, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is `False`. Default: `True`
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is `False`, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: `True`
reduction (str, optional): Specifies the reduction to apply to the output. Default: `"mean"`
log_target (bool, optional): Specifies whether `target` is the log space. Default: `False`
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Target: :math:`(*)`, same shape as the input.
- Output: scalar by default. If :attr:`reduction` is `'none'`, then :math:`(*)`,
same shape as the input.
Examples::
>>> kl_loss = nn.KLDivLoss(reduction="batchmean")
>>> # input should be a distribution in the log space
>>> input = F.log_softmax(torch.randn(3, 5, requires_grad=True))
>>> # Sample a batch of distributions. Usually this would come from the dataset
>>> target = F.softmax(torch.rand(3, 5))
>>> output = kl_loss(input, target)
>>> kl_loss = nn.KLDivLoss(reduction="batchmean", log_target=True)
>>> log_target = F.log_softmax(torch.rand(3, 5))
>>> output = kl_loss(input, log_target)
"""
__constants__ = ['reduction']
def __init__(self, size_average=None, reduce=None, reduction: str = 'mean', log_target: bool = False) -> None:
super(KLDivLoss, self).__init__(size_average, reduce, reduction)
self.log_target = log_target
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.kl_div(input, target, reduction=self.reduction, log_target=self.log_target)
class MSELoss(_Loss):
r"""Creates a criterion that measures the mean squared error (squared L2 norm) between
each element in the input :math:`x` and target :math:`y`.
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = \left( x_n - y_n \right)^2,
where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then:
.. math::
\ell(x, y) =
\begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
of :math:`n` elements each.
The mean operation still operates over all the elements, and divides by :math:`n`.
The division by :math:`n` can be avoided if one sets ``reduction = 'sum'``.
Args:
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Target: :math:`(*)`, same shape as the input.
Examples::
>>> loss = nn.MSELoss()
>>> input = torch.randn(3, 5, requires_grad=True)
>>> target = torch.randn(3, 5)
>>> output = loss(input, target)
>>> output.backward()
"""
__constants__ = ['reduction']
def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
super(MSELoss, self).__init__(size_average, reduce, reduction)
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.mse_loss(input, target, reduction=self.reduction)
class BCELoss(_WeightedLoss):
r"""Creates a criterion that measures the Binary Cross Entropy between the target and
the input probabilities:
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right],
where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets :math:`y` should be numbers
between 0 and 1.
Notice that if :math:`x_n` is either 0 or 1, one of the log terms would be
mathematically undefined in the above loss equation. PyTorch chooses to set
:math:`\log (0) = -\infty`, since :math:`\lim_{x\to 0} \log (x) = -\infty`.
However, an infinite term in the loss equation is not desirable for several reasons.
For one, if either :math:`y_n = 0` or :math:`(1 - y_n) = 0`, then we would be
multiplying 0 with infinity. Secondly, if we have an infinite loss value, then
we would also have an infinite term in our gradient, since
:math:`\lim_{x\to 0} \frac{d}{dx} \log (x) = \infty`.
This would make BCELoss's backward method nonlinear with respect to :math:`x_n`,
and using it for things like linear regression would not be straight-forward.
Our solution is that BCELoss clamps its log function outputs to be greater than
or equal to -100. This way, we can always have a finite loss value and a linear
backward method.
Args:
weight (Tensor, optional): a manual rescaling weight given to the loss
of each batch element. If given, has to be a Tensor of size `nbatch`.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Target: :math:`(*)`, same shape as the input.
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same
shape as input.
Examples::
>>> m = nn.Sigmoid()
>>> loss = nn.BCELoss()
>>> input = torch.randn(3, requires_grad=True)
>>> target = torch.empty(3).random_(2)
>>> output = loss(m(input), target)
>>> output.backward()
"""
__constants__ = ['reduction']
def __init__(self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = 'mean') -> None:
super(BCELoss, self).__init__(weight, size_average, reduce, reduction)
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction)
class BCEWithLogitsLoss(_Loss):
r"""This loss combines a `Sigmoid` layer and the `BCELoss` in one single
class. This version is more numerically stable than using a plain `Sigmoid`
followed by a `BCELoss` as, by combining the operations into one layer,
we take advantage of the log-sum-exp trick for numerical stability.
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_n \left[ y_n \cdot \log \sigma(x_n)
+ (1 - y_n) \cdot \log (1 - \sigma(x_n)) \right],
where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets `t[i]` should be numbers
between 0 and 1.
It's possible to trade off recall and precision by adding weights to positive examples.
In the case of multi-label classification the loss can be described as:
.. math::
\ell_c(x, y) = L_c = \{l_{1,c},\dots,l_{N,c}\}^\top, \quad
l_{n,c} = - w_{n,c} \left[ p_c y_{n,c} \cdot \log \sigma(x_{n,c})
+ (1 - y_{n,c}) \cdot \log (1 - \sigma(x_{n,c})) \right],
where :math:`c` is the class number (:math:`c > 1` for multi-label binary classification,
:math:`c = 1` for single-label binary classification),
:math:`n` is the number of the sample in the batch and
:math:`p_c` is the weight of the positive answer for the class :math:`c`.
:math:`p_c > 1` increases the recall, :math:`p_c < 1` increases the precision.
For example, if a dataset contains 100 positive and 300 negative examples of a single class,
then `pos_weight` for the class should be equal to :math:`\frac{300}{100}=3`.
The loss would act as if the dataset contains :math:`3\times 100=300` positive examples.
Examples::
>>> target = torch.ones([10, 64], dtype=torch.float32) # 64 classes, batch size = 10
>>> output = torch.full([10, 64], 1.5) # A prediction (logit)
>>> pos_weight = torch.ones([64]) # All weights are equal to 1
>>> criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)
>>> criterion(output, target) # -log(sigmoid(1.5))
tensor(0.20...)
Args:
weight (Tensor, optional): a manual rescaling weight given to the loss
of each batch element. If given, has to be a Tensor of size `nbatch`.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
pos_weight (Tensor, optional): a weight of positive examples.
Must be a vector with length equal to the number of classes.
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Target: :math:`(*)`, same shape as the input.
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same
shape as input.
Examples::
>>> loss = nn.BCEWithLogitsLoss()
>>> input = torch.randn(3, requires_grad=True)
>>> target = torch.empty(3).random_(2)
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = 'mean',
pos_weight: Optional[Tensor] = None) -> None:
super(BCEWithLogitsLoss, self).__init__(size_average, reduce, reduction)
self.register_buffer('weight', weight)
self.register_buffer('pos_weight', pos_weight)
self.weight: Optional[Tensor]
self.pos_weight: Optional[Tensor]
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.binary_cross_entropy_with_logits(input, target,
self.weight,
pos_weight=self.pos_weight,
reduction=self.reduction)
class HingeEmbeddingLoss(_Loss):
r"""Measures the loss given an input tensor :math:`x` and a labels tensor :math:`y`
(containing 1 or -1).
This is usually used for measuring whether two inputs are similar or
dissimilar, e.g. using the L1 pairwise distance as :math:`x`, and is typically
used for learning nonlinear embeddings or semi-supervised learning.
The loss function for :math:`n`-th sample in the mini-batch is
.. math::
l_n = \begin{cases}
x_n, & \text{if}\; y_n = 1,\\
\max \{0, \Delta - x_n\}, & \text{if}\; y_n = -1,
\end{cases}
and the total loss functions is
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
where :math:`L = \{l_1,\dots,l_N\}^\top`.
Args:
margin (float, optional): Has a default value of `1`.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(*)` where :math:`*` means, any number of dimensions. The sum operation
operates over all the elements.
- Target: :math:`(*)`, same shape as the input
- Output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input
"""
__constants__ = ['margin', 'reduction']
margin: float
def __init__(self, margin: float = 1.0, size_average=None, reduce=None, reduction: str = 'mean') -> None:
super(HingeEmbeddingLoss, self).__init__(size_average, reduce, reduction)
self.margin = margin
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.hinge_embedding_loss(input, target, margin=self.margin, reduction=self.reduction)
class MultiLabelMarginLoss(_Loss):
r"""Creates a criterion that optimizes a multi-class multi-classification
hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`)
and output :math:`y` (which is a 2D `Tensor` of target class indices).
For each sample in the mini-batch:
.. math::
\text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)}
where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`, \
:math:`y \in \left\{0, \; \cdots , \; \text{y.size}(0) - 1\right\}`, \
:math:`0 \leq y[j] \leq \text{x.size}(0)-1`, \
and :math:`i \neq y[j]` for all :math:`i` and :math:`j`.
:math:`y` and :math:`x` must have the same size.
The criterion only considers a contiguous block of non-negative targets that
starts at the front.
This allows for different samples to have variable amounts of target classes.
Args:
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(C)` or :math:`(N, C)` where `N` is the batch size and `C`
is the number of classes.
- Target: :math:`(C)` or :math:`(N, C)`, label targets padded by -1 ensuring same shape as the input.
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`.
Examples::
>>> loss = nn.MultiLabelMarginLoss()
>>> x = torch.FloatTensor([[0.1, 0.2, 0.4, 0.8]])
>>> # for target y, only consider labels 3 and 0, not after label -1
>>> y = torch.LongTensor([[3, 0, -1, 1]])
>>> # 0.25 * ((1-(0.1-0.2)) + (1-(0.1-0.4)) + (1-(0.8-0.2)) + (1-(0.8-0.4)))
>>> loss(x, y)
tensor(0.85...)
"""
__constants__ = ['reduction']
def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
super(MultiLabelMarginLoss, self).__init__(size_average, reduce, reduction)
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.multilabel_margin_loss(input, target, reduction=self.reduction)
class SmoothL1Loss(_Loss):
r"""Creates a criterion that uses a squared term if the absolute
element-wise error falls below beta and an L1 term otherwise.
It is less sensitive to outliers than :class:`torch.nn.MSELoss` and in some cases
prevents exploding gradients (e.g. see the paper `Fast R-CNN`_ by Ross Girshick).
For a batch of size :math:`N`, the unreduced loss can be described as:
.. math::
\ell(x, y) = L = \{l_1, ..., l_N\}^T
with
.. math::
l_n = \begin{cases}
0.5 (x_n - y_n)^2 / beta, & \text{if } |x_n - y_n| < beta \\
|x_n - y_n| - 0.5 * beta, & \text{otherwise }
\end{cases}
If `reduction` is not `none`, then:
.. math::
\ell(x, y) =
\begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
.. note::
Smooth L1 loss can be seen as exactly :class:`L1Loss`, but with the :math:`|x - y| < beta`
portion replaced with a quadratic function such that its slope is 1 at :math:`|x - y| = beta`.
The quadratic segment smooths the L1 loss near :math:`|x - y| = 0`.
.. note::
Smooth L1 loss is closely related to :class:`HuberLoss`, being
equivalent to :math:`huber(x, y) / beta` (note that Smooth L1's beta hyper-parameter is
also known as delta for Huber). This leads to the following differences:
* As beta -> 0, Smooth L1 loss converges to :class:`L1Loss`, while :class:`HuberLoss`
converges to a constant 0 loss. When beta is 0, Smooth L1 loss is equivalent to L1 loss.
* As beta -> :math:`+\infty`, Smooth L1 loss converges to a constant 0 loss, while
:class:`HuberLoss` converges to :class:`MSELoss`.
* For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant slope of 1.
For :class:`HuberLoss`, the slope of the L1 segment is beta.
.. _`Fast R-CNN`: https://arxiv.org/abs/1504.08083
Args:
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
beta (float, optional): Specifies the threshold at which to change between L1 and L2 loss.
The value must be non-negative. Default: 1.0
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Target: :math:`(*)`, same shape as the input.
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same shape as the input.
"""
__constants__ = ['reduction']
def __init__(self, size_average=None, reduce=None, reduction: str = 'mean', beta: float = 1.0) -> None:
super(SmoothL1Loss, self).__init__(size_average, reduce, reduction)
self.beta = beta
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.smooth_l1_loss(input, target, reduction=self.reduction, beta=self.beta)
class HuberLoss(_Loss):
r"""Creates a criterion that uses a squared term if the absolute
element-wise error falls below delta and a delta-scaled L1 term otherwise.
This loss combines advantages of both :class:`L1Loss` and :class:`MSELoss`; the
delta-scaled L1 region makes the loss less sensitive to outliers than :class:`MSELoss`,
while the L2 region provides smoothness over :class:`L1Loss` near 0. See
`Huber loss <https://en.wikipedia.org/wiki/Huber_loss>`_ for more information.
For a batch of size :math:`N`, the unreduced loss can be described as:
.. math::
\ell(x, y) = L = \{l_1, ..., l_N\}^T
with
.. math::
l_n = \begin{cases}
0.5 (x_n - y_n)^2, & \text{if } |x_n - y_n| < delta \\
delta * (|x_n - y_n| - 0.5 * delta), & \text{otherwise }
\end{cases}
If `reduction` is not `none`, then:
.. math::
\ell(x, y) =
\begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
.. note::
When delta is set to 1, this loss is equivalent to :class:`SmoothL1Loss`.
In general, this loss differs from :class:`SmoothL1Loss` by a factor of delta (AKA beta
in Smooth L1).
See :class:`SmoothL1Loss` for additional discussion on the differences in behavior
between the two losses.
Args:
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
delta (float, optional): Specifies the threshold at which to change between delta-scaled L1 and L2 loss.
The value must be positive. Default: 1.0
Shape:
- Input: :math:`(*)` where :math:`*` means any number of dimensions.
- Target: :math:`(*)`, same shape as the input.
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same shape as the input.
"""
__constants__ = ['reduction', 'delta']
def __init__(self, reduction: str = 'mean', delta: float = 1.0) -> None:
super().__init__(reduction=reduction)
self.delta = delta
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.huber_loss(input, target, reduction=self.reduction, delta=self.delta)
class SoftMarginLoss(_Loss):
r"""Creates a criterion that optimizes a two-class classification
logistic loss between input tensor :math:`x` and target tensor :math:`y`
(containing 1 or -1).
.. math::
\text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{\text{x.nelement}()}
Args:
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Target: :math:`(*)`, same shape as the input.
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same
shape as input.
"""
__constants__ = ['reduction']
def __init__(self, size_average=None, reduce=None, reduction: str = 'mean') -> None:
super(SoftMarginLoss, self).__init__(size_average, reduce, reduction)
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.soft_margin_loss(input, target, reduction=self.reduction)
class CrossEntropyLoss(_WeightedLoss):
r"""This criterion computes the cross entropy loss between input logits
and target.
It is useful when training a classification problem with `C` classes.
If provided, the optional argument :attr:`weight` should be a 1D `Tensor`
assigning weight to each of the classes.
This is particularly useful when you have an unbalanced training set.
The `input` is expected to contain the unnormalized logits for each class (which do `not` need
to be positive or sum to 1, in general).
`input` has to be a Tensor of size :math:`(C)` for unbatched input,
:math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1` for the
`K`-dimensional case. The last being useful for higher dimension inputs, such
as computing cross entropy loss per-pixel for 2D images.
The `target` that this criterion expects should contain either:
- Class indices in the range :math:`[0, C)` where :math:`C` is the number of classes; if
`ignore_index` is specified, this loss also accepts this class index (this index
may not necessarily be in the class range). The unreduced (i.e. with :attr:`reduction`
set to ``'none'``) loss for this case can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_{y_n} \log \frac{\exp(x_{n,y_n})}{\sum_{c=1}^C \exp(x_{n,c})}
\cdot \mathbb{1}\{y_n \not= \text{ignore\_index}\}
where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight,
:math:`C` is the number of classes, and :math:`N` spans the minibatch dimension as well as
:math:`d_1, ..., d_k` for the `K`-dimensional case. If
:attr:`reduction` is not ``'none'`` (default ``'mean'``), then
.. math::
\ell(x, y) = \begin{cases}
\sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n} \cdot \mathbb{1}\{y_n \not= \text{ignore\_index}\}} l_n, &
\text{if reduction} = \text{`mean';}\\
\sum_{n=1}^N l_n, &
\text{if reduction} = \text{`sum'.}
\end{cases}
Note that this case is equivalent to the combination of :class:`~torch.nn.LogSoftmax` and
:class:`~torch.nn.NLLLoss`.
- Probabilities for each class; useful when labels beyond a single class per minibatch item
are required, such as for blended labels, label smoothing, etc. The unreduced (i.e. with
:attr:`reduction` set to ``'none'``) loss for this case can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - \sum_{c=1}^C w_c \log \frac{\exp(x_{n,c})}{\sum_{i=1}^C \exp(x_{n,i})} y_{n,c}
where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight,
:math:`C` is the number of classes, and :math:`N` spans the minibatch dimension as well as
:math:`d_1, ..., d_k` for the `K`-dimensional case. If
:attr:`reduction` is not ``'none'`` (default ``'mean'``), then
.. math::
\ell(x, y) = \begin{cases}
\frac{\sum_{n=1}^N l_n}{N}, &
\text{if reduction} = \text{`mean';}\\
\sum_{n=1}^N l_n, &
\text{if reduction} = \text{`sum'.}
\end{cases}
.. note::
The performance of this criterion is generally better when `target` contains class
indices, as this allows for optimized computation. Consider providing `target` as
class probabilities only when a single class label per minibatch item is too restrictive.
Args:
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size `C`
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When :attr:`size_average` is
``True``, the loss is averaged over non-ignored targets. Note that
:attr:`ignore_index` is only applicable when the target contains class indices.
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will
be applied, ``'mean'``: the weighted mean of the output is taken,
``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in
the meantime, specifying either of those two args will override
:attr:`reduction`. Default: ``'mean'``
label_smoothing (float, optional): A float in [0.0, 1.0]. Specifies the amount
of smoothing when computing the loss, where 0.0 means no smoothing. The targets
become a mixture of the original ground truth and a uniform distribution as described in
`Rethinking the Inception Architecture for Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`.
Shape:
- Input: Shape :math:`(C)`, :math:`(N, C)` or :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`
in the case of `K`-dimensional loss.
- Target: If containing class indices, shape :math:`()`, :math:`(N)` or :math:`(N, d_1, d_2, ..., d_K)` with
:math:`K \geq 1` in the case of K-dimensional loss where each value should be between :math:`[0, C)`.
If containing class probabilities, same shape as the input and each value should be between :math:`[0, 1]`.
- Output: If reduction is 'none', shape :math:`()`, :math:`(N)` or :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1`
in the case of K-dimensional loss, depending on the shape of the input. Otherwise, scalar.
where:
.. math::
\begin{aligned}
C ={} & \text{number of classes} \\
N ={} & \text{batch size} \\
\end{aligned}
Examples::
>>> # Example of target with class indices
>>> loss = nn.CrossEntropyLoss()
>>> input = torch.randn(3, 5, requires_grad=True)
>>> target = torch.empty(3, dtype=torch.long).random_(5)
>>> output = loss(input, target)
>>> output.backward()
>>>
>>> # Example of target with class probabilities
>>> input = torch.randn(3, 5, requires_grad=True)
>>> target = torch.randn(3, 5).softmax(dim=1)
>>> output = loss(input, target)
>>> output.backward()
"""
__constants__ = ['ignore_index', 'reduction', 'label_smoothing']
ignore_index: int
label_smoothing: float
def __init__(self, weight: Optional[Tensor] = None, size_average=None, ignore_index: int = -100,
reduce=None, reduction: str = 'mean', label_smoothing: float = 0.0) -> None:
super(CrossEntropyLoss, self).__init__(weight, size_average, reduce, reduction)
self.ignore_index = ignore_index
self.label_smoothing = label_smoothing
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.cross_entropy(input, target, weight=self.weight,
ignore_index=self.ignore_index, reduction=self.reduction,
label_smoothing=self.label_smoothing)
class MultiLabelSoftMarginLoss(_WeightedLoss):
r"""Creates a criterion that optimizes a multi-label one-versus-all
loss based on max-entropy, between input :math:`x` and target :math:`y` of size
:math:`(N, C)`.
For each sample in the minibatch:
.. math::
loss(x, y) = - \frac{1}{C} * \sum_i y[i] * \log((1 + \exp(-x[i]))^{-1})
+ (1-y[i]) * \log\left(\frac{\exp(-x[i])}{(1 + \exp(-x[i]))}\right)
where :math:`i \in \left\{0, \; \cdots , \; \text{x.nElement}() - 1\right\}`,
:math:`y[i] \in \left\{0, \; 1\right\}`.
Args:
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, it has to be a Tensor of size `C`. Otherwise, it is
treated as if having all ones.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(N, C)` where `N` is the batch size and `C` is the number of classes.
- Target: :math:`(N, C)`, label targets padded by -1 ensuring same shape as the input.
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`.
"""
__constants__ = ['reduction']
def __init__(self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = 'mean') -> None:
super(MultiLabelSoftMarginLoss, self).__init__(weight, size_average, reduce, reduction)
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.multilabel_soft_margin_loss(input, target, weight=self.weight, reduction=self.reduction)
class CosineEmbeddingLoss(_Loss):
r"""Creates a criterion that measures the loss given input tensors
:math:`x_1`, :math:`x_2` and a `Tensor` label :math:`y` with values 1 or -1.
This is used for measuring whether two inputs are similar or dissimilar,
using the cosine similarity, and is typically used for learning nonlinear
embeddings or semi-supervised learning.
The loss function for each sample is:
.. math::
\text{loss}(x, y) =
\begin{cases}
1 - \cos(x_1, x_2), & \text{if } y = 1 \\
\max(0, \cos(x_1, x_2) - \text{margin}), & \text{if } y = -1
\end{cases}
Args:
margin (float, optional): Should be a number from :math:`-1` to :math:`1`,
:math:`0` to :math:`0.5` is suggested. If :attr:`margin` is missing, the
default value is :math:`0`.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input1: :math:`(N, D)` or :math:`(D)`, where `N` is the batch size and `D` is the embedding dimension.
- Input2: :math:`(N, D)` or :math:`(D)`, same shape as Input1.
- Target: :math:`(N)` or :math:`()`.
- Output: If :attr:`reduction` is ``'none'``, then :math:`(N)`, otherwise scalar.
"""
__constants__ = ['margin', 'reduction']
margin: float
def __init__(self, margin: float = 0., size_average=None, reduce=None, reduction: str = 'mean') -> None:
super(CosineEmbeddingLoss, self).__init__(size_average, reduce, reduction)
self.margin = margin
def forward(self, input1: Tensor, input2: Tensor, target: Tensor) -> Tensor:
return F.cosine_embedding_loss(input1, input2, target, margin=self.margin, reduction=self.reduction)
class MarginRankingLoss(_Loss):
r"""Creates a criterion that measures the loss given
inputs :math:`x1`, :math:`x2`, two 1D mini-batch or 0D `Tensors`,
and a label 1D mini-batch or 0D `Tensor` :math:`y` (containing 1 or -1).
If :math:`y = 1` then it assumed the first input should be ranked higher
(have a larger value) than the second input, and vice-versa for :math:`y = -1`.
The loss function for each pair of samples in the mini-batch is:
.. math::
\text{loss}(x1, x2, y) = \max(0, -y * (x1 - x2) + \text{margin})
Args:
margin (float, optional): Has a default value of :math:`0`.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input1: :math:`(N)` or :math:`()` where `N` is the batch size.
- Input2: :math:`(N)` or :math:`()`, same shape as the Input1.
- Target: :math:`(N)` or :math:`()`, same shape as the inputs.
- Output: scalar. If :attr:`reduction` is ``'none'`` and Input size is not :math:`()`, then :math:`(N)`.
Examples::
>>> loss = nn.MarginRankingLoss()
>>> input1 = torch.randn(3, requires_grad=True)
>>> input2 = torch.randn(3, requires_grad=True)
>>> target = torch.randn(3).sign()
>>> output = loss(input1, input2, target)
>>> output.backward()
"""
__constants__ = ['margin', 'reduction']
margin: float
def __init__(self, margin: float = 0., size_average=None, reduce=None, reduction: str = 'mean') -> None:
super(MarginRankingLoss, self).__init__(size_average, reduce, reduction)
self.margin = margin
def forward(self, input1: Tensor, input2: Tensor, target: Tensor) -> Tensor:
return F.margin_ranking_loss(input1, input2, target, margin=self.margin, reduction=self.reduction)
class MultiMarginLoss(_WeightedLoss):
r"""Creates a criterion that optimizes a multi-class classification hinge
loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) and
output :math:`y` (which is a 1D tensor of target class indices,
:math:`0 \leq y \leq \text{x.size}(1)-1`):
For each mini-batch sample, the loss in terms of the 1D input :math:`x` and scalar
output :math:`y` is:
.. math::
\text{loss}(x, y) = \frac{\sum_i \max(0, \text{margin} - x[y] + x[i])^p}{\text{x.size}(0)}
where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`
and :math:`i \neq y`.
Optionally, you can give non-equal weighting on the classes by passing
a 1D :attr:`weight` tensor into the constructor.
The loss function then becomes:
.. math::
\text{loss}(x, y) = \frac{\sum_i \max(0, w[y] * (\text{margin} - x[y] + x[i]))^p}{\text{x.size}(0)}
Args:
p (int, optional): Has a default value of :math:`1`. :math:`1` and :math:`2`
are the only supported values.
margin (float, optional): Has a default value of :math:`1`.
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, it has to be a Tensor of size `C`. Otherwise, it is
treated as if having all ones.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(N, C)` or :math:`(C)`, where :math:`N` is the batch size and :math:`C` is the number of classes.
- Target: :math:`(N)` or :math:`()`, where each value is :math:`0 \leq \text{targets}[i] \leq C-1`.
- Output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the target.
Examples::
>>> loss = nn.MultiMarginLoss()
>>> x = torch.tensor([[0.1, 0.2, 0.4, 0.8]])
>>> y = torch.tensor([3])
>>> # 0.25 * ((1-(0.8-0.1)) + (1-(0.8-0.2)) + (1-(0.8-0.4)))
>>> loss(x, y)
tensor(0.32...)
"""
__constants__ = ['p', 'margin', 'reduction']
margin: float
p: int
def __init__(self, p: int = 1, margin: float = 1., weight: Optional[Tensor] = None, size_average=None,
reduce=None, reduction: str = 'mean') -> None:
super(MultiMarginLoss, self).__init__(weight, size_average, reduce, reduction)
if p != 1 and p != 2:
raise ValueError("only p == 1 and p == 2 supported")
assert weight is None or weight.dim() == 1
self.p = p
self.margin = margin
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.multi_margin_loss(input, target, p=self.p, margin=self.margin,
weight=self.weight, reduction=self.reduction)
class TripletMarginLoss(_Loss):
r"""Creates a criterion that measures the triplet loss given an input
tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater than :math:`0`.
This is used for measuring a relative similarity between samples. A triplet
is composed by `a`, `p` and `n` (i.e., `anchor`, `positive examples` and `negative
examples` respectively). The shapes of all input tensors should be
:math:`(N, D)`.
The distance swap is described in detail in the paper `Learning shallow
convolutional feature descriptors with triplet losses`_ by
V. Balntas, E. Riba et al.
The loss function for each sample in the mini-batch is:
.. math::
L(a, p, n) = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\}
where
.. math::
d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p
See also :class:`~torch.nn.TripletMarginWithDistanceLoss`, which computes the
triplet margin loss for input tensors using a custom distance function.
Args:
margin (float, optional): Default: :math:`1`.
p (int, optional): The norm degree for pairwise distance. Default: :math:`2`.
swap (bool, optional): The distance swap is described in detail in the paper
`Learning shallow convolutional feature descriptors with triplet losses` by
V. Balntas, E. Riba et al. Default: ``False``.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(N, D)` or :math:`(D)` where :math:`D` is the vector dimension.
- Output: A Tensor of shape :math:`(N)` if :attr:`reduction` is ``'none'`` and
input shape is :math:`(N, D)`; a scalar otherwise.
Examples::
>>> triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2)
>>> anchor = torch.randn(100, 128, requires_grad=True)
>>> positive = torch.randn(100, 128, requires_grad=True)
>>> negative = torch.randn(100, 128, requires_grad=True)
>>> output = triplet_loss(anchor, positive, negative)
>>> output.backward()
.. _Learning shallow convolutional feature descriptors with triplet losses:
http://www.bmva.org/bmvc/2016/papers/paper119/index.html
"""
__constants__ = ['margin', 'p', 'eps', 'swap', 'reduction']
margin: float
p: float
eps: float
swap: bool
def __init__(self, margin: float = 1.0, p: float = 2., eps: float = 1e-6, swap: bool = False, size_average=None,
reduce=None, reduction: str = 'mean'):
super(TripletMarginLoss, self).__init__(size_average, reduce, reduction)
self.margin = margin
self.p = p
self.eps = eps
self.swap = swap
def forward(self, anchor: Tensor, positive: Tensor, negative: Tensor) -> Tensor:
return F.triplet_margin_loss(anchor, positive, negative, margin=self.margin, p=self.p,
eps=self.eps, swap=self.swap, reduction=self.reduction)
class TripletMarginWithDistanceLoss(_Loss):
r"""Creates a criterion that measures the triplet loss given input
tensors :math:`a`, :math:`p`, and :math:`n` (representing anchor,
positive, and negative examples, respectively), and a nonnegative,
real-valued function ("distance function") used to compute the relationship
between the anchor and positive example ("positive distance") and the
anchor and negative example ("negative distance").
The unreduced loss (i.e., with :attr:`reduction` set to ``'none'``)
can be described as:
.. math::
\ell(a, p, n) = L = \{l_1,\dots,l_N\}^\top, \quad
l_i = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\}
where :math:`N` is the batch size; :math:`d` is a nonnegative, real-valued function
quantifying the closeness of two tensors, referred to as the :attr:`distance_function`;
and :math:`margin` is a nonnegative margin representing the minimum difference
between the positive and negative distances that is required for the loss to
be 0. The input tensors have :math:`N` elements each and can be of any shape
that the distance function can handle.
If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then:
.. math::
\ell(x, y) =
\begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
See also :class:`~torch.nn.TripletMarginLoss`, which computes the triplet
loss for input tensors using the :math:`l_p` distance as the distance function.
Args:
distance_function (Callable, optional): A nonnegative, real-valued function that
quantifies the closeness of two tensors. If not specified,
`nn.PairwiseDistance` will be used. Default: ``None``
margin (float, optional): A nonnegative margin representing the minimum difference
between the positive and negative distances required for the loss to be 0. Larger
margins penalize cases where the negative examples are not distant enough from the
anchors, relative to the positives. Default: :math:`1`.
swap (bool, optional): Whether to use the distance swap described in the paper
`Learning shallow convolutional feature descriptors with triplet losses` by
V. Balntas, E. Riba et al. If True, and if the positive example is closer to the
negative example than the anchor is, swaps the positive example and the anchor in
the loss computation. Default: ``False``.
reduction (str, optional): Specifies the (optional) reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Shape:
- Input: :math:`(N, *)` where :math:`*` represents any number of additional dimensions
as supported by the distance function.
- Output: A Tensor of shape :math:`(N)` if :attr:`reduction` is ``'none'``, or a scalar
otherwise.
Examples::
>>> # Initialize embeddings
>>> embedding = nn.Embedding(1000, 128)
>>> anchor_ids = torch.randint(0, 1000, (1,))
>>> positive_ids = torch.randint(0, 1000, (1,))
>>> negative_ids = torch.randint(0, 1000, (1,))
>>> anchor = embedding(anchor_ids)
>>> positive = embedding(positive_ids)
>>> negative = embedding(negative_ids)
>>>
>>> # Built-in Distance Function
>>> triplet_loss = \
>>> nn.TripletMarginWithDistanceLoss(distance_function=nn.PairwiseDistance())
>>> output = triplet_loss(anchor, positive, negative)
>>> output.backward()
>>>
>>> # Custom Distance Function
>>> def l_infinity(x1, x2):
>>> return torch.max(torch.abs(x1 - x2), dim=1).values
>>>
>>> # xdoctest: +SKIP("FIXME: Would call backwards a second time")
>>> triplet_loss = (
>>> nn.TripletMarginWithDistanceLoss(distance_function=l_infinity, margin=1.5))
>>> output = triplet_loss(anchor, positive, negative)
>>> output.backward()
>>>
>>> # Custom Distance Function (Lambda)
>>> triplet_loss = (
>>> nn.TripletMarginWithDistanceLoss(
>>> distance_function=lambda x, y: 1.0 - F.cosine_similarity(x, y)))
>>> output = triplet_loss(anchor, positive, negative)
>>> output.backward()
Reference:
V. Balntas, et al.: Learning shallow convolutional feature descriptors with triplet losses:
http://www.bmva.org/bmvc/2016/papers/paper119/index.html
"""
__constants__ = ['margin', 'swap', 'reduction']
margin: float
swap: bool
def __init__(self, *, distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = None,
margin: float = 1.0, swap: bool = False, reduction: str = 'mean'):
super(TripletMarginWithDistanceLoss, self).__init__(size_average=None, reduce=None, reduction=reduction)
self.distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = \
distance_function if distance_function is not None else PairwiseDistance()
self.margin = margin
self.swap = swap
def forward(self, anchor: Tensor, positive: Tensor, negative: Tensor) -> Tensor:
return F.triplet_margin_with_distance_loss(anchor, positive, negative,
distance_function=self.distance_function,
margin=self.margin, swap=self.swap, reduction=self.reduction)
class CTCLoss(_Loss):
r"""The Connectionist Temporal Classification loss.
Calculates loss between a continuous (unsegmented) time series and a target sequence. CTCLoss sums over the
probability of possible alignments of input to target, producing a loss value which is differentiable
with respect to each input node. The alignment of input to target is assumed to be "many-to-one", which
limits the length of the target sequence such that it must be :math:`\leq` the input length.
Args:
blank (int, optional): blank label. Default :math:`0`.
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: ``'mean'``
zero_infinity (bool, optional):
Whether to zero infinite losses and the associated gradients.
Default: ``False``
Infinite losses mainly occur when the inputs are too short
to be aligned to the targets.
Shape:
- Log_probs: Tensor of size :math:`(T, N, C)` or :math:`(T, C)`,
where :math:`T = \text{input length}`,
:math:`N = \text{batch size}`, and
:math:`C = \text{number of classes (including blank)}`.
The logarithmized probabilities of the outputs (e.g. obtained with
:func:`torch.nn.functional.log_softmax`).
- Targets: Tensor of size :math:`(N, S)` or
:math:`(\operatorname{sum}(\text{target\_lengths}))`,
where :math:`N = \text{batch size}` and
:math:`S = \text{max target length, if shape is } (N, S)`.
It represent the target sequences. Each element in the target
sequence is a class index. And the target index cannot be blank (default=0).
In the :math:`(N, S)` form, targets are padded to the
length of the longest sequence, and stacked.
In the :math:`(\operatorname{sum}(\text{target\_lengths}))` form,
the targets are assumed to be un-padded and
concatenated within 1 dimension.
- Input_lengths: Tuple or tensor of size :math:`(N)` or :math:`()`,
where :math:`N = \text{batch size}`. It represent the lengths of the
inputs (must each be :math:`\leq T`). And the lengths are specified
for each sequence to achieve masking under the assumption that sequences
are padded to equal lengths.
- Target_lengths: Tuple or tensor of size :math:`(N)` or :math:`()`,
where :math:`N = \text{batch size}`. It represent lengths of the targets.
Lengths are specified for each sequence to achieve masking under the
assumption that sequences are padded to equal lengths. If target shape is
:math:`(N,S)`, target_lengths are effectively the stop index
:math:`s_n` for each target sequence, such that ``target_n = targets[n,0:s_n]`` for
each target in a batch. Lengths must each be :math:`\leq S`
If the targets are given as a 1d tensor that is the concatenation of individual
targets, the target_lengths must add up to the total length of the tensor.
- Output: scalar. If :attr:`reduction` is ``'none'``, then
:math:`(N)` if input is batched or :math:`()` if input is unbatched, where :math:`N = \text{batch size}`.
Examples::
>>> # Target are to be padded
>>> T = 50 # Input sequence length
>>> C = 20 # Number of classes (including blank)
>>> N = 16 # Batch size
>>> S = 30 # Target sequence length of longest target in batch (padding length)
>>> S_min = 10 # Minimum target length, for demonstration purposes
>>>
>>> # Initialize random batch of input vectors, for *size = (T,N,C)
>>> input = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_()
>>>
>>> # Initialize random batch of targets (0 = blank, 1:C = classes)
>>> target = torch.randint(low=1, high=C, size=(N, S), dtype=torch.long)
>>>
>>> input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long)
>>> target_lengths = torch.randint(low=S_min, high=S, size=(N,), dtype=torch.long)
>>> ctc_loss = nn.CTCLoss()
>>> loss = ctc_loss(input, target, input_lengths, target_lengths)
>>> loss.backward()
>>>
>>>
>>> # Target are to be un-padded
>>> T = 50 # Input sequence length
>>> C = 20 # Number of classes (including blank)
>>> N = 16 # Batch size
>>>
>>> # Initialize random batch of input vectors, for *size = (T,N,C)
>>> input = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_()
>>> input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long)
>>>
>>> # Initialize random batch of targets (0 = blank, 1:C = classes)
>>> target_lengths = torch.randint(low=1, high=T, size=(N,), dtype=torch.long)
>>> target = torch.randint(low=1, high=C, size=(sum(target_lengths),), dtype=torch.long)
>>> ctc_loss = nn.CTCLoss()
>>> loss = ctc_loss(input, target, input_lengths, target_lengths)
>>> loss.backward()
>>>
>>>
>>> # Target are to be un-padded and unbatched (effectively N=1)
>>> T = 50 # Input sequence length
>>> C = 20 # Number of classes (including blank)
>>>
>>> # Initialize random batch of input vectors, for *size = (T,C)
>>> # xdoctest: +SKIP("FIXME: error in doctest")
>>> input = torch.randn(T, C).log_softmax(2).detach().requires_grad_()
>>> input_lengths = torch.tensor(T, dtype=torch.long)
>>>
>>> # Initialize random batch of targets (0 = blank, 1:C = classes)
>>> target_lengths = torch.randint(low=1, high=T, size=(), dtype=torch.long)
>>> target = torch.randint(low=1, high=C, size=(target_lengths,), dtype=torch.long)
>>> ctc_loss = nn.CTCLoss()
>>> loss = ctc_loss(input, target, input_lengths, target_lengths)
>>> loss.backward()
Reference:
A. Graves et al.: Connectionist Temporal Classification:
Labelling Unsegmented Sequence Data with Recurrent Neural Networks:
https://www.cs.toronto.edu/~graves/icml_2006.pdf
Note:
In order to use CuDNN, the following must be satisfied: :attr:`targets` must be
in concatenated format, all :attr:`input_lengths` must be `T`. :math:`blank=0`,
:attr:`target_lengths` :math:`\leq 256`, the integer arguments must be of
dtype :attr:`torch.int32`.
The regular implementation uses the (more common in PyTorch) `torch.long` dtype.
Note:
In some circumstances when using the CUDA backend with CuDNN, this operator
may select a nondeterministic algorithm to increase performance. If this is
undesirable, you can try to make the operation deterministic (potentially at
a performance cost) by setting ``torch.backends.cudnn.deterministic =
True``.
Please see the notes on :doc:`/notes/randomness` for background.
"""
__constants__ = ['blank', 'reduction']
blank: int
zero_infinity: bool
def __init__(self, blank: int = 0, reduction: str = 'mean', zero_infinity: bool = False):
super(CTCLoss, self).__init__(reduction=reduction)
self.blank = blank
self.zero_infinity = zero_infinity
def forward(self, log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor) -> Tensor:
return F.ctc_loss(log_probs, targets, input_lengths, target_lengths, self.blank, self.reduction,
self.zero_infinity)
# TODO: L1HingeEmbeddingCriterion
# TODO: MSECriterion weight
# TODO: ClassSimplexCriterion
| pytorch-master | torch/nn/modules/loss.py |
import warnings
from typing import Optional, Tuple
import torch
from torch import Tensor
from .linear import NonDynamicallyQuantizableLinear
from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
from torch.nn.parameter import Parameter
from .module import Module
from .. import functional as F
__all__ = ['Threshold', 'ReLU', 'RReLU', 'Hardtanh', 'ReLU6', 'Sigmoid', 'Hardsigmoid', 'Tanh',
'SiLU', 'Mish', 'Hardswish', 'ELU', 'CELU', 'SELU', 'GLU', 'GELU', 'Hardshrink', 'LeakyReLU',
'LogSigmoid', 'Softplus', 'Softshrink', 'MultiheadAttention', 'PReLU', 'Softsign', 'Tanhshrink',
'Softmin', 'Softmax', 'Softmax2d', 'LogSoftmax']
class Threshold(Module):
r"""Thresholds each element of the input Tensor.
Threshold is defined as:
.. math::
y =
\begin{cases}
x, &\text{ if } x > \text{threshold} \\
\text{value}, &\text{ otherwise }
\end{cases}
Args:
threshold: The value to threshold at
value: The value to replace with
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
Examples::
>>> m = nn.Threshold(0.1, 20)
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['threshold', 'value', 'inplace']
threshold: float
value: float
inplace: bool
def __init__(self, threshold: float, value: float, inplace: bool = False) -> None:
super(Threshold, self).__init__()
self.threshold = threshold
self.value = value
self.inplace = inplace
# TODO: check in THNN (if inplace == True, then assert value <= threshold)
def forward(self, input: Tensor) -> Tensor:
return F.threshold(input, self.threshold, self.value, self.inplace)
def extra_repr(self):
inplace_str = ', inplace=True' if self.inplace else ''
return 'threshold={}, value={}{}'.format(
self.threshold, self.value, inplace_str
)
class ReLU(Module):
r"""Applies the rectified linear unit function element-wise:
:math:`\text{ReLU}(x) = (x)^+ = \max(0, x)`
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/ReLU.png
Examples::
>>> m = nn.ReLU()
>>> input = torch.randn(2)
>>> output = m(input)
An implementation of CReLU - https://arxiv.org/abs/1603.05201
>>> m = nn.ReLU()
>>> input = torch.randn(2).unsqueeze(0)
>>> output = torch.cat((m(input),m(-input)))
"""
__constants__ = ['inplace']
inplace: bool
def __init__(self, inplace: bool = False):
super(ReLU, self).__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.relu(input, inplace=self.inplace)
def extra_repr(self) -> str:
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class RReLU(Module):
r"""Applies the randomized leaky rectified liner unit function, element-wise,
as described in the paper:
`Empirical Evaluation of Rectified Activations in Convolutional Network`_.
The function is defined as:
.. math::
\text{RReLU}(x) =
\begin{cases}
x & \text{if } x \geq 0 \\
ax & \text{ otherwise }
\end{cases}
where :math:`a` is randomly sampled from uniform distribution
:math:`\mathcal{U}(\text{lower}, \text{upper})`.
See: https://arxiv.org/pdf/1505.00853.pdf
Args:
lower: lower bound of the uniform distribution. Default: :math:`\frac{1}{8}`
upper: upper bound of the uniform distribution. Default: :math:`\frac{1}{3}`
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/RReLU.png
Examples::
>>> m = nn.RReLU(0.1, 0.3)
>>> input = torch.randn(2)
>>> output = m(input)
.. _`Empirical Evaluation of Rectified Activations in Convolutional Network`:
https://arxiv.org/abs/1505.00853
"""
__constants__ = ['lower', 'upper', 'inplace']
lower: float
upper: float
inplace: bool
def __init__(
self,
lower: float = 1. / 8,
upper: float = 1. / 3,
inplace: bool = False
):
super(RReLU, self).__init__()
self.lower = lower
self.upper = upper
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.rrelu(input, self.lower, self.upper, self.training, self.inplace)
def extra_repr(self):
inplace_str = ', inplace=True' if self.inplace else ''
return 'lower={}, upper={}{}'.format(self.lower, self.upper, inplace_str)
class Hardtanh(Module):
r"""Applies the HardTanh function element-wise.
HardTanh is defined as:
.. math::
\text{HardTanh}(x) = \begin{cases}
\text{max\_val} & \text{ if } x > \text{ max\_val } \\
\text{min\_val} & \text{ if } x < \text{ min\_val } \\
x & \text{ otherwise } \\
\end{cases}
Args:
min_val: minimum value of the linear region range. Default: -1
max_val: maximum value of the linear region range. Default: 1
inplace: can optionally do the operation in-place. Default: ``False``
Keyword arguments :attr:`min_value` and :attr:`max_value`
have been deprecated in favor of :attr:`min_val` and :attr:`max_val`.
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Hardtanh.png
Examples::
>>> m = nn.Hardtanh(-2, 2)
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['min_val', 'max_val', 'inplace']
min_val: float
max_val: float
inplace: bool
def __init__(
self,
min_val: float = -1.,
max_val: float = 1.,
inplace: bool = False,
min_value: Optional[float] = None,
max_value: Optional[float] = None
) -> None:
super(Hardtanh, self).__init__()
if min_value is not None:
warnings.warn("keyword argument min_value is deprecated and rename to min_val")
min_val = min_value
if max_value is not None:
warnings.warn("keyword argument max_value is deprecated and rename to max_val")
max_val = max_value
self.min_val = min_val
self.max_val = max_val
self.inplace = inplace
assert self.max_val > self.min_val
def forward(self, input: Tensor) -> Tensor:
return F.hardtanh(input, self.min_val, self.max_val, self.inplace)
def extra_repr(self) -> str:
inplace_str = ', inplace=True' if self.inplace else ''
return 'min_val={}, max_val={}{}'.format(
self.min_val, self.max_val, inplace_str
)
class ReLU6(Hardtanh):
r"""Applies the element-wise function:
.. math::
\text{ReLU6}(x) = \min(\max(0,x), 6)
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/ReLU6.png
Examples::
>>> m = nn.ReLU6()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, inplace: bool = False):
super(ReLU6, self).__init__(0., 6., inplace)
def extra_repr(self) -> str:
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class Sigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{Sigmoid}(x) = \sigma(x) = \frac{1}{1 + \exp(-x)}
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Sigmoid.png
Examples::
>>> m = nn.Sigmoid()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return torch.sigmoid(input)
class Hardsigmoid(Module):
r"""Applies the Hardsigmoid function element-wise.
Hardsigmoid is defined as:
.. math::
\text{Hardsigmoid}(x) = \begin{cases}
0 & \text{if~} x \le -3, \\
1 & \text{if~} x \ge +3, \\
x / 6 + 1 / 2 & \text{otherwise}
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Hardsigmoid.png
Examples::
>>> m = nn.Hardsigmoid()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['inplace']
inplace: bool
def __init__(self, inplace : bool = False) -> None:
super(Hardsigmoid, self).__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.hardsigmoid(input, self.inplace)
class Tanh(Module):
r"""Applies the Hyperbolic Tangent (Tanh) function element-wise.
Tanh is defined as:
.. math::
\text{Tanh}(x) = \tanh(x) = \frac{\exp(x) - \exp(-x)} {\exp(x) + \exp(-x)}
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Tanh.png
Examples::
>>> m = nn.Tanh()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return torch.tanh(input)
class SiLU(Module):
r"""Applies the Sigmoid Linear Unit (SiLU) function, element-wise.
The SiLU function is also known as the swish function.
.. math::
\text{silu}(x) = x * \sigma(x), \text{where } \sigma(x) \text{ is the logistic sigmoid.}
.. note::
See `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_
where the SiLU (Sigmoid Linear Unit) was originally coined, and see
`Sigmoid-Weighted Linear Units for Neural Network Function Approximation
in Reinforcement Learning <https://arxiv.org/abs/1702.03118>`_ and `Swish:
a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941v1>`_
where the SiLU was experimented with later.
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/SiLU.png
Examples::
>>> m = nn.SiLU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['inplace']
inplace: bool
def __init__(self, inplace: bool = False):
super(SiLU, self).__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.silu(input, inplace=self.inplace)
def extra_repr(self) -> str:
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class Mish(Module):
r"""Applies the Mish function, element-wise.
Mish: A Self Regularized Non-Monotonic Neural Activation Function.
.. math::
\text{Mish}(x) = x * \text{Tanh}(\text{Softplus}(x))
.. note::
See `Mish: A Self Regularized Non-Monotonic Neural Activation Function <https://arxiv.org/abs/1908.08681>`_
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Mish.png
Examples::
>>> m = nn.Mish()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['inplace']
inplace: bool
def __init__(self, inplace: bool = False):
super(Mish, self).__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.mish(input, inplace=self.inplace)
def extra_repr(self) -> str:
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class Hardswish(Module):
r"""Applies the Hardswish function, element-wise, as described in the paper:
`Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`_.
Hardswish is defined as:
.. math::
\text{Hardswish}(x) = \begin{cases}
0 & \text{if~} x \le -3, \\
x & \text{if~} x \ge +3, \\
x \cdot (x + 3) /6 & \text{otherwise}
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Hardswish.png
Examples::
>>> m = nn.Hardswish()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['inplace']
inplace: bool
def __init__(self, inplace : bool = False) -> None:
super(Hardswish, self).__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.hardswish(input, self.inplace)
class ELU(Module):
r"""Applies the Exponential Linear Unit (ELU) function, element-wise, as described
in the paper: `Fast and Accurate Deep Network Learning by Exponential Linear
Units (ELUs) <https://arxiv.org/abs/1511.07289>`__.
ELU is defined as:
.. math::
\text{ELU}(x) = \begin{cases}
x, & \text{ if } x > 0\\
\alpha * (\exp(x) - 1), & \text{ if } x \leq 0
\end{cases}
Args:
alpha: the :math:`\alpha` value for the ELU formulation. Default: 1.0
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/ELU.png
Examples::
>>> m = nn.ELU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['alpha', 'inplace']
alpha: float
inplace: bool
def __init__(self, alpha: float = 1., inplace: bool = False) -> None:
super(ELU, self).__init__()
self.alpha = alpha
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.elu(input, self.alpha, self.inplace)
def extra_repr(self) -> str:
inplace_str = ', inplace=True' if self.inplace else ''
return 'alpha={}{}'.format(self.alpha, inplace_str)
class CELU(Module):
r"""Applies the element-wise function:
.. math::
\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))
More details can be found in the paper `Continuously Differentiable Exponential Linear Units`_ .
Args:
alpha: the :math:`\alpha` value for the CELU formulation. Default: 1.0
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/CELU.png
Examples::
>>> m = nn.CELU()
>>> input = torch.randn(2)
>>> output = m(input)
.. _`Continuously Differentiable Exponential Linear Units`:
https://arxiv.org/abs/1704.07483
"""
__constants__ = ['alpha', 'inplace']
alpha: float
inplace: bool
def __init__(self, alpha: float = 1., inplace: bool = False) -> None:
super(CELU, self).__init__()
self.alpha = alpha
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.celu(input, self.alpha, self.inplace)
def extra_repr(self) -> str:
inplace_str = ', inplace=True' if self.inplace else ''
return 'alpha={}{}'.format(self.alpha, inplace_str)
class SELU(Module):
r"""Applied element-wise, as:
.. math::
\text{SELU}(x) = \text{scale} * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1)))
with :math:`\alpha = 1.6732632423543772848170429916717` and
:math:`\text{scale} = 1.0507009873554804934193349852946`.
.. warning::
When using ``kaiming_normal`` or ``kaiming_normal_`` for initialisation,
``nonlinearity='linear'`` should be used instead of ``nonlinearity='selu'``
in order to get `Self-Normalizing Neural Networks`_.
See :func:`torch.nn.init.calculate_gain` for more information.
More details can be found in the paper `Self-Normalizing Neural Networks`_ .
Args:
inplace (bool, optional): can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/SELU.png
Examples::
>>> m = nn.SELU()
>>> input = torch.randn(2)
>>> output = m(input)
.. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
"""
__constants__ = ['inplace']
inplace: bool
def __init__(self, inplace: bool = False) -> None:
super(SELU, self).__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.selu(input, self.inplace)
def extra_repr(self) -> str:
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class GLU(Module):
r"""Applies the gated linear unit function
:math:`{GLU}(a, b)= a \otimes \sigma(b)` where :math:`a` is the first half
of the input matrices and :math:`b` is the second half.
Args:
dim (int): the dimension on which to split the input. Default: -1
Shape:
- Input: :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional
dimensions
- Output: :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`
Examples::
>>> m = nn.GLU()
>>> input = torch.randn(4, 2)
>>> output = m(input)
"""
__constants__ = ['dim']
dim: int
def __init__(self, dim: int = -1) -> None:
super(GLU, self).__init__()
self.dim = dim
def forward(self, input: Tensor) -> Tensor:
return F.glu(input, self.dim)
def extra_repr(self) -> str:
return 'dim={}'.format(self.dim)
class GELU(Module):
r"""Applies the Gaussian Error Linear Units function:
.. math:: \text{GELU}(x) = x * \Phi(x)
where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution.
When the approximate argument is 'tanh', Gelu is estimated with:
:math:: \text{GELU}(x) = 0.5 * x * (1 + \text{Tanh}(\sqrt(2 / \pi) * (x + 0.044715 * x^3)))
Args:
approximate (str, optional): the gelu approximation algorithm to use:
``'none'`` | ``'tanh'``. Default: ``'none'``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/GELU.png
Examples::
>>> m = nn.GELU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['approximate']
approximate: str
def __init__(self, approximate: str = 'none') -> None:
super(GELU, self).__init__()
self.approximate = approximate
def forward(self, input: Tensor) -> Tensor:
return F.gelu(input, approximate=self.approximate)
def extra_repr(self) -> str:
return 'approximate={}'.format(self.approximate)
class Hardshrink(Module):
r"""Applies the Hard Shrinkage (Hardshrink) function element-wise.
Hardshrink is defined as:
.. math::
\text{HardShrink}(x) =
\begin{cases}
x, & \text{ if } x > \lambda \\
x, & \text{ if } x < -\lambda \\
0, & \text{ otherwise }
\end{cases}
Args:
lambd: the :math:`\lambda` value for the Hardshrink formulation. Default: 0.5
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Hardshrink.png
Examples::
>>> m = nn.Hardshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['lambd']
lambd: float
def __init__(self, lambd: float = 0.5) -> None:
super(Hardshrink, self).__init__()
self.lambd = lambd
def forward(self, input: Tensor) -> Tensor:
return F.hardshrink(input, self.lambd)
def extra_repr(self) -> str:
return '{}'.format(self.lambd)
class LeakyReLU(Module):
r"""Applies the element-wise function:
.. math::
\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)
or
.. math::
\text{LeakyReLU}(x) =
\begin{cases}
x, & \text{ if } x \geq 0 \\
\text{negative\_slope} \times x, & \text{ otherwise }
\end{cases}
Args:
negative_slope: Controls the angle of the negative slope. Default: 1e-2
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
.. image:: ../scripts/activation_images/LeakyReLU.png
Examples::
>>> m = nn.LeakyReLU(0.1)
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['inplace', 'negative_slope']
inplace: bool
negative_slope: float
def __init__(self, negative_slope: float = 1e-2, inplace: bool = False) -> None:
super(LeakyReLU, self).__init__()
self.negative_slope = negative_slope
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.leaky_relu(input, self.negative_slope, self.inplace)
def extra_repr(self) -> str:
inplace_str = ', inplace=True' if self.inplace else ''
return 'negative_slope={}{}'.format(self.negative_slope, inplace_str)
class LogSigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{LogSigmoid}(x) = \log\left(\frac{ 1 }{ 1 + \exp(-x)}\right)
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/LogSigmoid.png
Examples::
>>> m = nn.LogSigmoid()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return F.logsigmoid(input)
class Softplus(Module):
r"""Applies the Softplus function :math:`\text{Softplus}(x) = \frac{1}{\beta} *
\log(1 + \exp(\beta * x))` element-wise.
SoftPlus is a smooth approximation to the ReLU function and can be used
to constrain the output of a machine to always be positive.
For numerical stability the implementation reverts to the linear function
when :math:`input \times \beta > threshold`.
Args:
beta: the :math:`\beta` value for the Softplus formulation. Default: 1
threshold: values above this revert to a linear function. Default: 20
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Softplus.png
Examples::
>>> m = nn.Softplus()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['beta', 'threshold']
beta: int
threshold: int
def __init__(self, beta: int = 1, threshold: int = 20) -> None:
super(Softplus, self).__init__()
self.beta = beta
self.threshold = threshold
def forward(self, input: Tensor) -> Tensor:
return F.softplus(input, self.beta, self.threshold)
def extra_repr(self) -> str:
return 'beta={}, threshold={}'.format(self.beta, self.threshold)
class Softshrink(Module):
r"""Applies the soft shrinkage function elementwise:
.. math::
\text{SoftShrinkage}(x) =
\begin{cases}
x - \lambda, & \text{ if } x > \lambda \\
x + \lambda, & \text{ if } x < -\lambda \\
0, & \text{ otherwise }
\end{cases}
Args:
lambd: the :math:`\lambda` (must be no less than zero) value for the Softshrink formulation. Default: 0.5
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Softshrink.png
Examples::
>>> m = nn.Softshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['lambd']
lambd: float
def __init__(self, lambd: float = 0.5) -> None:
super(Softshrink, self).__init__()
self.lambd = lambd
def forward(self, input: Tensor) -> Tensor:
return F.softshrink(input, self.lambd)
def extra_repr(self) -> str:
return str(self.lambd)
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces as described in the paper:
`Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
Multi-Head Attention is defined as:
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
``forward()`` will use a special optimized implementation if all of the following
conditions are met:
- self attention is being computed (i.e., ``query``, ``key``, and ``value`` are the same tensor. This
restriction will be loosened in the future.)
- Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor argument ``requires_grad``
- training is disabled (using ``.eval()``)
- dropout is 0
- ``add_bias_kv`` is ``False``
- ``add_zero_attn`` is ``False``
- ``batch_first`` is ``True`` and the input is batched
- ``kdim`` and ``vdim`` are equal to ``embed_dim``
- at most one of ``key_padding_mask`` or ``attn_mask`` is passed
- if a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ is passed, neither ``key_padding_mask``
nor ``attn_mask`` is passed
If the optimized implementation is in use, a
`NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be passed for
``query``/``key``/``value`` to represent padding more efficiently than using a
padding mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_
will be returned, and an additional speedup proportional to the fraction of the input
that is padding can be expected.
Args:
embed_dim: Total dimension of the model.
num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split
across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).
dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).
bias: If specified, adds bias to input / output projection layers. Default: ``True``.
add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.
add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.
Default: ``False``.
kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).
vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Examples::
>>> # xdoctest: +SKIP
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__constants__ = ['batch_first']
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False,
kdim=None, vdim=None, batch_first=False, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if not self._qkv_same_embed_dim:
self.q_proj_weight = Parameter(torch.empty((embed_dim, embed_dim), **factory_kwargs))
self.k_proj_weight = Parameter(torch.empty((embed_dim, self.kdim), **factory_kwargs))
self.v_proj_weight = Parameter(torch.empty((embed_dim, self.vdim), **factory_kwargs))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty((3 * embed_dim, embed_dim), **factory_kwargs))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim, **factory_kwargs))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = NonDynamicallyQuantizableLinear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
if add_bias_kv:
self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query: Tensor, key: Tensor, value: Tensor, key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True, attn_mask: Optional[Tensor] = None,
average_attn_weights: bool = True) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False``
or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length,
:math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``.
Queries are compared against key-value pairs to produce the output.
See "Attention Is All You Need" for more details.
key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False``
or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length,
:math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``.
See "Attention Is All You Need" for more details.
value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when
``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source
sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``.
See "Attention Is All You Need" for more details.
key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``
to ignore for the purpose of attention (i.e. treat as "padding"). For unbatched `query`, shape should be :math:`(S)`.
Binary and byte masks are supported.
For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for
the purpose of attention. For a byte mask, a non-zero value indicates that the corresponding ``key``
value will be ignored.
need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.
Default: ``True``.
attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape
:math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size,
:math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be
broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.
Binary, byte, and float masks are supported. For a binary mask, a ``True`` value indicates that the
corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the
corresponding position is not allowed to attend. For a float mask, the mask values will be added to
the attention weight.
average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
effect when ``need_weights=True``. Default: ``True`` (i.e. average weights across heads)
Outputs:
- **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched,
:math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``,
where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the
embedding dimension ``embed_dim``.
- **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``,
returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
:math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
:math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched or :math:`(N, \text{num\_heads}, L, S)`.
.. note::
`batch_first` argument is ignored for unbatched inputs.
"""
is_batched = query.dim() == 3
why_not_fast_path = ''
if not is_batched:
why_not_fast_path = f"input not batched; expected query.dim() of 3 but got {query.dim()}"
elif query is not key or key is not value:
# When lifting this restriction, don't forget to either
# enforce that the dtypes all match or test cases where
# they don't!
why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
elif self.in_proj_weight is not None and query.dtype != self.in_proj_weight.dtype:
# this case will fail anyway, but at least they'll get a useful error message.
why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
elif self.training:
why_not_fast_path = "training is enabled"
elif not self.batch_first:
why_not_fast_path = "batch_first was not True"
elif self.bias_k is not None:
why_not_fast_path = "self.bias_k was not None"
elif self.bias_v is not None:
why_not_fast_path = "self.bias_v was not None"
elif self.dropout:
why_not_fast_path = f"dropout was {self.dropout}, required zero"
elif self.add_zero_attn:
why_not_fast_path = "add_zero_attn was enabled"
elif not self._qkv_same_embed_dim:
why_not_fast_path = "_qkv_same_embed_dim was not True"
elif attn_mask is not None:
why_not_fast_path = "attn_mask was not None"
elif query.is_nested and key_padding_mask is not None:
why_not_fast_path = "key_padding_mask is not supported with NestedTensor input"
if not why_not_fast_path:
tensor_args = (
query,
key,
value,
self.in_proj_weight,
self.in_proj_bias,
self.out_proj.weight,
self.out_proj.bias,
)
# We have to use list comprehensions below because TorchScript does not support
# generator expressions.
if torch.overrides.has_torch_function(tensor_args):
why_not_fast_path = "some Tensor argument has_torch_function"
elif not all([(x.is_cuda or 'cpu' in str(x.device)) for x in tensor_args]):
why_not_fast_path = "some Tensor argument is neither CUDA nor CPU"
elif torch.is_grad_enabled() and any([x.requires_grad for x in tensor_args]):
why_not_fast_path = ("grad is enabled and at least one of query or the "
"input/output projection weights or biases requires_grad")
if not why_not_fast_path:
return torch._native_multi_head_attention(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.out_proj.weight,
self.out_proj.bias,
key_padding_mask if key_padding_mask is not None else attn_mask,
need_weights,
average_attn_weights,
1 if key_padding_mask is not None else 0 if attn_mask is not None else None)
any_nested = query.is_nested or key.is_nested or value.is_nested
assert not any_nested, ("MultiheadAttention does not support NestedTensor outside of its fast path. " +
f"The fast path was not hit because {why_not_fast_path}")
if self.batch_first and is_batched:
# make sure that the transpose op does not affect the "is" property
if key is value:
if query is key:
query = key = value = query.transpose(1, 0)
else:
query, key = [x.transpose(1, 0) for x in (query, key)]
value = key
else:
query, key, value = [x.transpose(1, 0) for x in (query, key, value)]
if not self._qkv_same_embed_dim:
attn_output, attn_output_weights = F.multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, average_attn_weights=average_attn_weights)
else:
attn_output, attn_output_weights = F.multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, average_attn_weights=average_attn_weights)
if self.batch_first and is_batched:
return attn_output.transpose(1, 0), attn_output_weights
else:
return attn_output, attn_output_weights
class PReLU(Module):
r"""Applies the element-wise function:
.. math::
\text{PReLU}(x) = \max(0,x) + a * \min(0,x)
or
.. math::
\text{PReLU}(x) =
\begin{cases}
x, & \text{ if } x \geq 0 \\
ax, & \text{ otherwise }
\end{cases}
Here :math:`a` is a learnable parameter. When called without arguments, `nn.PReLU()` uses a single
parameter :math:`a` across all input channels. If called with `nn.PReLU(nChannels)`,
a separate :math:`a` is used for each input channel.
.. note::
weight decay should not be used when learning :math:`a` for good performance.
.. note::
Channel dim is the 2nd dim of input. When input has dims < 2, then there is
no channel dim and the number of channels = 1.
Args:
num_parameters (int): number of :math:`a` to learn.
Although it takes an int as input, there is only two values are legitimate:
1, or the number of channels at input. Default: 1
init (float): the initial value of :math:`a`. Default: 0.25
Shape:
- Input: :math:`( *)` where `*` means, any number of additional
dimensions.
- Output: :math:`(*)`, same shape as the input.
Attributes:
weight (Tensor): the learnable weights of shape (:attr:`num_parameters`).
.. image:: ../scripts/activation_images/PReLU.png
Examples::
>>> m = nn.PReLU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['num_parameters']
num_parameters: int
def __init__(self, num_parameters: int = 1, init: float = 0.25,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
self.num_parameters = num_parameters
super(PReLU, self).__init__()
self.weight = Parameter(torch.empty(num_parameters, **factory_kwargs).fill_(init))
def forward(self, input: Tensor) -> Tensor:
return F.prelu(input, self.weight)
def extra_repr(self) -> str:
return 'num_parameters={}'.format(self.num_parameters)
class Softsign(Module):
r"""Applies the element-wise function:
.. math::
\text{SoftSign}(x) = \frac{x}{ 1 + |x|}
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Softsign.png
Examples::
>>> m = nn.Softsign()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return F.softsign(input)
class Tanhshrink(Module):
r"""Applies the element-wise function:
.. math::
\text{Tanhshrink}(x) = x - \tanh(x)
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Tanhshrink.png
Examples::
>>> m = nn.Tanhshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
return F.tanhshrink(input)
class Softmin(Module):
r"""Applies the Softmin function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range `[0, 1]` and sum to 1.
Softmin is defined as:
.. math::
\text{Softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_j \exp(-x_j)}
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Args:
dim (int): A dimension along which Softmin will be computed (so every slice
along dim will sum to 1).
Returns:
a Tensor of the same dimension and shape as the input, with
values in the range [0, 1]
Examples::
>>> m = nn.Softmin()
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
__constants__ = ['dim']
dim: Optional[int]
def __init__(self, dim: Optional[int] = None) -> None:
super(Softmin, self).__init__()
self.dim = dim
def __setstate__(self, state):
super().__setstate__(state)
if not hasattr(self, 'dim'):
self.dim = None
def forward(self, input: Tensor) -> Tensor:
return F.softmin(input, self.dim, _stacklevel=5)
def extra_repr(self):
return 'dim={dim}'.format(dim=self.dim)
class Softmax(Module):
r"""Applies the Softmax function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range [0,1] and sum to 1.
Softmax is defined as:
.. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
When the input Tensor is a sparse tensor then the unspecifed
values are treated as ``-inf``.
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Args:
dim (int): A dimension along which Softmax will be computed (so every slice
along dim will sum to 1).
.. note::
This module doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use `LogSoftmax` instead (it's faster and has better numerical properties).
Examples::
>>> m = nn.Softmax(dim=1)
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
__constants__ = ['dim']
dim: Optional[int]
def __init__(self, dim: Optional[int] = None) -> None:
super(Softmax, self).__init__()
self.dim = dim
def __setstate__(self, state):
super().__setstate__(state)
if not hasattr(self, 'dim'):
self.dim = None
def forward(self, input: Tensor) -> Tensor:
return F.softmax(input, self.dim, _stacklevel=5)
def extra_repr(self) -> str:
return 'dim={dim}'.format(dim=self.dim)
class Softmax2d(Module):
r"""Applies SoftMax over features to each spatial location.
When given an image of ``Channels x Height x Width``, it will
apply `Softmax` to each location :math:`(Channels, h_i, w_j)`
Shape:
- Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`.
- Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Examples::
>>> m = nn.Softmax2d()
>>> # you softmax over the 2nd dimension
>>> input = torch.randn(2, 3, 12, 13)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
assert input.dim() == 4 or input.dim() == 3, 'Softmax2d requires a 3D or 4D tensor as input'
return F.softmax(input, -3, _stacklevel=5)
class LogSoftmax(Module):
r"""Applies the :math:`\log(\text{Softmax}(x))` function to an n-dimensional
input Tensor. The LogSoftmax formulation can be simplified as:
.. math::
\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Args:
dim (int): A dimension along which LogSoftmax will be computed.
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [-inf, 0)
Examples::
>>> m = nn.LogSoftmax()
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
__constants__ = ['dim']
dim: Optional[int]
def __init__(self, dim: Optional[int] = None) -> None:
super(LogSoftmax, self).__init__()
self.dim = dim
def __setstate__(self, state):
super().__setstate__(state)
if not hasattr(self, 'dim'):
self.dim = None
def forward(self, input: Tensor) -> Tensor:
return F.log_softmax(input, self.dim, _stacklevel=5)
def extra_repr(self):
return 'dim={dim}'.format(dim=self.dim)
| pytorch-master | torch/nn/modules/activation.py |
import collections
from itertools import repeat
from typing import List, Dict, Any
__all__ = ['consume_prefix_in_state_dict_if_present']
def _ntuple(n, name="parse"):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return tuple(x)
return tuple(repeat(x, n))
parse.__name__ = name
return parse
_single = _ntuple(1, "_single")
_pair = _ntuple(2, "_pair")
_triple = _ntuple(3, "_triple")
_quadruple = _ntuple(4, "_quadruple")
def _reverse_repeat_tuple(t, n):
r"""Reverse the order of `t` and repeat each element for `n` times.
This can be used to translate padding arg used by Conv and Pooling modules
to the ones used by `F.pad`.
"""
return tuple(x for x in reversed(t) for _ in range(n))
def _list_with_default(out_size: List[int], defaults: List[int]) -> List[int]:
if isinstance(out_size, int):
return out_size
if len(defaults) <= len(out_size):
raise ValueError(
"Input dimension should be at least {}".format(len(out_size) + 1)
)
return [
v if v is not None else d for v, d in zip(out_size, defaults[-len(out_size) :])
]
def consume_prefix_in_state_dict_if_present(
state_dict: Dict[str, Any], prefix: str
) -> None:
r"""Strip the prefix in state_dict in place, if any.
..note::
Given a `state_dict` from a DP/DDP model, a local model can load it by applying
`consume_prefix_in_state_dict_if_present(state_dict, "module.")` before calling
:meth:`torch.nn.Module.load_state_dict`.
Args:
state_dict (OrderedDict): a state-dict to be loaded to the model.
prefix (str): prefix.
"""
keys = sorted(state_dict.keys())
for key in keys:
if key.startswith(prefix):
newkey = key[len(prefix) :]
state_dict[newkey] = state_dict.pop(key)
# also strip the prefix in metadata if any.
if "_metadata" in state_dict:
metadata = state_dict["_metadata"]
for key in list(metadata.keys()):
# for the metadata dict, the key can be:
# '': for the DDP module, which we want to remove.
# 'module': for the actual model.
# 'module.xx.xx': for the rest.
if len(key) == 0:
continue
newkey = key[len(prefix) :]
metadata[newkey] = metadata.pop(key)
| pytorch-master | torch/nn/modules/utils.py |
import copy
from typing import Optional, Any, Union, Callable
import torch
from torch import Tensor
from .. import functional as F
from .module import Module
from .activation import MultiheadAttention
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
from .normalization import LayerNorm
__all__ = ['Transformer', 'TransformerEncoder', 'TransformerDecoder', 'TransformerEncoderLayer', 'TransformerDecoderLayer']
class Transformer(Module):
r"""A transformer model. User is able to modify the attributes as needed. The architecture
is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information
Processing Systems, pages 6000-6010.
Args:
d_model: the number of expected features in the encoder/decoder inputs (default=512).
nhead: the number of heads in the multiheadattention models (default=8).
num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of encoder/decoder intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
custom_encoder: custom encoder (default=None).
custom_decoder: custom decoder (default=None).
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
norm_first: if ``True``, encoder and decoder layers will perform LayerNorms before
other attention and feedforward operations, otherwise after. Default: ``False`` (after).
Examples::
>>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12)
>>> src = torch.rand((10, 32, 512))
>>> tgt = torch.rand((20, 32, 512))
>>> out = transformer_model(src, tgt)
Note: A full example to apply nn.Transformer module for the word language model is available in
https://github.com/pytorch/examples/tree/master/word_language_model
"""
def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6,
num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1,
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
custom_encoder: Optional[Any] = None, custom_decoder: Optional[Any] = None,
layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(Transformer, self).__init__()
if custom_encoder is not None:
self.encoder = custom_encoder
else:
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
activation, layer_norm_eps, batch_first, norm_first,
**factory_kwargs)
encoder_norm = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
if custom_decoder is not None:
self.decoder = custom_decoder
else:
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,
activation, layer_norm_eps, batch_first, norm_first,
**factory_kwargs)
decoder_norm = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
self.batch_first = batch_first
def forward(self, src: Tensor, tgt: Tensor, src_mask: Optional[Tensor] = None, tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Take in and process masked source/target sequences.
Args:
src: the sequence to the encoder (required).
tgt: the sequence to the decoder (required).
src_mask: the additive mask for the src sequence (optional).
tgt_mask: the additive mask for the tgt sequence (optional).
memory_mask: the additive mask for the encoder output (optional).
src_key_padding_mask: the ByteTensor mask for src keys per batch (optional).
tgt_key_padding_mask: the ByteTensor mask for tgt keys per batch (optional).
memory_key_padding_mask: the ByteTensor mask for memory keys per batch (optional).
Shape:
- src: :math:`(S, E)` for unbatched input, :math:`(S, N, E)` if `batch_first=False` or
`(N, S, E)` if `batch_first=True`.
- tgt: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or
`(N, T, E)` if `batch_first=True`.
- src_mask: :math:`(S, S)` or :math:`(N\cdot\text{num\_heads}, S, S)`.
- tgt_mask: :math:`(T, T)` or :math:`(N\cdot\text{num\_heads}, T, T)`.
- memory_mask: :math:`(T, S)`.
- src_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`.
- tgt_key_padding_mask: :math:`(T)` for unbatched input otherwise :math:`(N, T)`.
- memory_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`.
Note: [src/tgt/memory]_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
[src/tgt/memory]_key_padding_mask provides specified elements in the key to be ignored by
the attention. If a ByteTensor is provided, the non-zero positions will be ignored while the zero
positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- output: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or
`(N, T, E)` if `batch_first=True`.
Note: Due to the multi-head attention architecture in the transformer model,
the output sequence length of a transformer is same as the input sequence
(i.e. target) length of the decoder.
where S is the source sequence length, T is the target sequence length, N is the
batch size, E is the feature number
Examples:
>>> # xdoctest: +SKIP
>>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask)
"""
is_batched = src.dim() == 3
if not self.batch_first and src.size(1) != tgt.size(1) and is_batched:
raise RuntimeError("the batch number of src and tgt must be equal")
elif self.batch_first and src.size(0) != tgt.size(0) and is_batched:
raise RuntimeError("the batch number of src and tgt must be equal")
if src.size(-1) != self.d_model or tgt.size(-1) != self.d_model:
raise RuntimeError("the feature number of src and tgt must be equal to d_model")
memory = self.encoder(src, mask=src_mask, src_key_padding_mask=src_key_padding_mask)
output = self.decoder(tgt, memory, tgt_mask=tgt_mask, memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
return output
@staticmethod
def generate_square_subsequent_mask(sz: int) -> Tensor:
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
return torch.triu(torch.full((sz, sz), float('-inf')), diagonal=1)
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
class TransformerEncoder(Module):
r"""TransformerEncoder is a stack of N encoder layers. Users can build the
BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
enable_nested_tensor: if True, input will automatically convert to nested tensor
(and convert back on output). This will improve the overall performance of
TransformerEncoder when padding rate is high. Default: ``True`` (enabled).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
__constants__ = ['norm']
def __init__(self, encoder_layer, num_layers, norm=None, enable_nested_tensor=True, mask_check=True):
super(TransformerEncoder, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.enable_nested_tensor = enable_nested_tensor
self.mask_check = mask_check
def forward(self, src: Tensor, mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Pass the input through the encoder layers in turn.
Args:
src: the sequence to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
output = src
convert_to_nested = False
first_layer = self.layers[0]
src_key_padding_mask_for_layers = src_key_padding_mask
why_not_sparsity_fast_path = ''
str_first_layer = "self.layers[0]"
if not isinstance(first_layer, torch.nn.TransformerEncoderLayer):
why_not_sparsity_fast_path = f"{str_first_layer} was not TransformerEncoderLayer"
elif first_layer.norm_first :
why_not_sparsity_fast_path = f"{str_first_layer}.norm_first was True"
elif first_layer.training:
why_not_sparsity_fast_path = f"{str_first_layer} was in training mode"
elif not first_layer.self_attn.batch_first:
why_not_sparsity_fast_path = f" {str_first_layer}.self_attn.batch_first was not True"
elif not first_layer.self_attn._qkv_same_embed_dim:
why_not_sparsity_fast_path = f"{str_first_layer}.self_attn._qkv_same_embed_dim was not True"
elif not first_layer.activation_relu_or_gelu:
why_not_sparsity_fast_path = f" {str_first_layer}.activation_relu_or_gelu was not True"
elif not (first_layer.norm1.eps == first_layer.norm2.eps) :
why_not_sparsity_fast_path = f"{str_first_layer}.norm1.eps was not equal to {str_first_layer}.norm2.eps"
elif not src.dim() == 3:
why_not_sparsity_fast_path = f"input not batched; expected src.dim() of 3 but got {src.dim()}"
elif not self.enable_nested_tensor:
why_not_sparsity_fast_path = "enable_nested_tensor was not True"
elif src_key_padding_mask is None:
why_not_sparsity_fast_path = "src_key_padding_mask was None"
elif (((not hasattr(self, "mask_check")) or self.mask_check)
and not torch._nested_tensor_from_mask_left_aligned(src, src_key_padding_mask.logical_not())):
why_not_sparsity_fast_path = "mask_check enabled, and src and src_key_padding_mask was not left aligned"
elif output.is_nested:
why_not_sparsity_fast_path = "NestedTensor input is not supported"
elif mask is not None:
why_not_sparsity_fast_path = "src_key_padding_mask and mask were both supplied"
if not why_not_sparsity_fast_path:
tensor_args = (
src,
first_layer.self_attn.in_proj_weight,
first_layer.self_attn.in_proj_bias,
first_layer.self_attn.out_proj.weight,
first_layer.self_attn.out_proj.bias,
first_layer.norm1.weight,
first_layer.norm1.bias,
first_layer.norm2.weight,
first_layer.norm2.bias,
first_layer.linear1.weight,
first_layer.linear1.bias,
first_layer.linear2.weight,
first_layer.linear2.bias,
)
if torch.overrides.has_torch_function(tensor_args):
why_not_sparsity_fast_path = "some Tensor argument has_torch_function"
elif not (src.is_cuda or 'cpu' in str(src.device)):
why_not_sparsity_fast_path = "src is neither CUDA nor CPU"
elif torch.is_grad_enabled() and any([x.requires_grad for x in tensor_args]):
why_not_sparsity_fast_path = ("grad is enabled and at least one of query or the "
"input/output projection weights or biases requires_grad")
if (not why_not_sparsity_fast_path) and (src_key_padding_mask is not None):
convert_to_nested = True
# simplify on or after on 8/16/2022 to unconditionally call with mask_check=False
# we have established that either (1) the mask is OK with the check above,
# or (2) that we don't need a mask check with mask_check=False in the init
if not torch.jit.is_scripting():
output = torch._nested_tensor_from_mask(output, src_key_padding_mask.logical_not(), mask_check=False)
else:
# When scripting, make a simpler call until the FC bar passes on 8/16/2022
output = torch._nested_tensor_from_mask(output, src_key_padding_mask.logical_not())
src_key_padding_mask_for_layers = None
for mod in self.layers:
output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask_for_layers)
if convert_to_nested:
output = output.to_padded_tensor(0.)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(Module):
r"""TransformerDecoder is a stack of N decoder layers
Args:
decoder_layer: an instance of the TransformerDecoderLayer() class (required).
num_layers: the number of sub-decoder-layers in the decoder (required).
norm: the layer normalization component (optional).
Examples::
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
>>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
>>> memory = torch.rand(10, 32, 512)
>>> tgt = torch.rand(20, 32, 512)
>>> out = transformer_decoder(tgt, memory)
"""
__constants__ = ['norm']
def __init__(self, decoder_layer, num_layers, norm=None):
super(TransformerDecoder, self).__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Pass the inputs (and mask) through the decoder layer in turn.
Args:
tgt: the sequence to the decoder (required).
memory: the sequence from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
output = tgt
for mod in self.layers:
output = mod(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerEncoderLayer(Module):
r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of the intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
norm_first: if ``True``, layer norm is done prior to attention and feedforward
operations, respectively. Otherwise it's done after. Default: ``False`` (after).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
Alternatively, when ``batch_first`` is ``True``:
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)
>>> src = torch.rand(32, 10, 512)
>>> out = encoder_layer(src)
Fast path:
forward() will use a special optimized implementation if all of the following
conditions are met:
- Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor
argument ``requires_grad``
- training is disabled (using ``.eval()``)
- batch_first is ``True`` and the input is batched (i.e., ``src.dim() == 3``)
- activation is one of: ``"relu"``, ``"gelu"``, ``torch.functional.relu``, or ``torch.functional.gelu``
- at most one of ``src_mask`` and ``src_key_padding_mask`` is passed
- if src is a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_, neither ``src_mask``
nor ``src_key_padding_mask`` is passed
- the two ``LayerNorm`` instances have a consistent ``eps`` value (this will naturally be the case
unless the caller has manually modified one without modifying the other)
If the optimized implementation is in use, a
`NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be
passed for ``src`` to represent padding more efficiently than using a padding
mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ will be
returned, and an additional speedup proportional to the fraction of the input that
is padding can be expected.
"""
__constants__ = ['batch_first', 'norm_first']
def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1,
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,
**factory_kwargs)
# Implementation of Feedforward model
self.linear1 = Linear(d_model, dim_feedforward, **factory_kwargs)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model, **factory_kwargs)
self.norm_first = norm_first
self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
# Legacy string support for activation function.
if isinstance(activation, str):
activation = _get_activation_fn(activation)
# We can't test self.activation in forward() in TorchScript,
# so stash some information about it instead.
if activation is F.relu or isinstance(activation, torch.nn.ReLU):
self.activation_relu_or_gelu = 1
elif activation is F.gelu or isinstance(activation, torch.nn.GELU):
self.activation_relu_or_gelu = 2
else:
self.activation_relu_or_gelu = 0
self.activation = activation
def __setstate__(self, state):
super(TransformerEncoderLayer, self).__setstate__(state)
if not hasattr(self, 'activation'):
self.activation = F.relu
def forward(self, src: Tensor, src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
# see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
why_not_sparsity_fast_path = ''
if not src.dim() == 3:
why_not_sparsity_fast_path = f"input not batched; expected src.dim() of 3 but got {src.dim()}"
elif self.training:
why_not_sparsity_fast_path = "training is enabled"
elif not self.self_attn.batch_first :
why_not_sparsity_fast_path = "self_attn.batch_first was not True"
elif not self.self_attn._qkv_same_embed_dim :
why_not_sparsity_fast_path = "self_attn._qkv_same_embed_dim was not True"
elif not self.activation_relu_or_gelu:
why_not_sparsity_fast_path = "activation_relu_or_gelu was not True"
elif not (self.norm1.eps == self.norm2.eps):
why_not_sparsity_fast_path = "norm1.eps is not equal to norm2.eps"
elif src_mask is not None:
why_not_sparsity_fast_path = "src_mask is not supported for fastpath"
elif src.is_nested and src_key_padding_mask is not None:
why_not_sparsity_fast_path = "src_key_padding_mask is not supported with NestedTensor input for fastpath"
if not why_not_sparsity_fast_path:
tensor_args = (
src,
self.self_attn.in_proj_weight,
self.self_attn.in_proj_bias,
self.self_attn.out_proj.weight,
self.self_attn.out_proj.bias,
self.norm1.weight,
self.norm1.bias,
self.norm2.weight,
self.norm2.bias,
self.linear1.weight,
self.linear1.bias,
self.linear2.weight,
self.linear2.bias,
)
# We have to use list comprehensions below because TorchScript does not support
# generator expressions.
if torch.overrides.has_torch_function(tensor_args):
why_not_sparsity_fast_path = "some Tensor argument has_torch_function"
elif not all([(x.is_cuda or 'cpu' in str(x.device)) for x in tensor_args]):
why_not_sparsity_fast_path = "some Tensor argument is neither CUDA nor CPU"
elif torch.is_grad_enabled() and any([x.requires_grad for x in tensor_args]):
why_not_sparsity_fast_path = ("grad is enabled and at least one of query or the "
"input/output projection weights or biases requires_grad")
if not why_not_sparsity_fast_path:
return torch._transformer_encoder_layer_fwd(
src,
self.self_attn.embed_dim,
self.self_attn.num_heads,
self.self_attn.in_proj_weight,
self.self_attn.in_proj_bias,
self.self_attn.out_proj.weight,
self.self_attn.out_proj.bias,
self.activation_relu_or_gelu == 2,
self.norm_first,
self.norm1.eps,
self.norm1.weight,
self.norm1.bias,
self.norm2.weight,
self.norm2.bias,
self.linear1.weight,
self.linear1.bias,
self.linear2.weight,
self.linear2.bias,
# TODO: if src_mask and src_key_padding_mask merge to single 4-dim mask
src_mask if src_mask is not None else src_key_padding_mask,
1 if src_key_padding_mask is not None else
0 if src_mask is not None else
None,
)
x = src
if self.norm_first:
x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)
x = x + self._ff_block(self.norm2(x))
else:
x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask))
x = self.norm2(x + self._ff_block(x))
return x
# self-attention block
def _sa_block(self, x: Tensor,
attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:
x = self.self_attn(x, x, x,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False)[0]
return self.dropout1(x)
# feed forward block
def _ff_block(self, x: Tensor) -> Tensor:
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout2(x)
class TransformerDecoderLayer(Module):
r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
This standard decoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of the intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
norm_first: if ``True``, layer norm is done prior to self attention, multihead
attention and feedforward operations, respectively. Otherwise it's done after.
Default: ``False`` (after).
Examples::
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
>>> memory = torch.rand(10, 32, 512)
>>> tgt = torch.rand(20, 32, 512)
>>> out = decoder_layer(tgt, memory)
Alternatively, when ``batch_first`` is ``True``:
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=True)
>>> memory = torch.rand(32, 10, 512)
>>> tgt = torch.rand(32, 20, 512)
>>> out = decoder_layer(tgt, memory)
"""
__constants__ = ['batch_first', 'norm_first']
def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1,
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(TransformerDecoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,
**factory_kwargs)
self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,
**factory_kwargs)
# Implementation of Feedforward model
self.linear1 = Linear(d_model, dim_feedforward, **factory_kwargs)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model, **factory_kwargs)
self.norm_first = norm_first
self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.norm3 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.dropout3 = Dropout(dropout)
# Legacy string support for activation function.
if isinstance(activation, str):
self.activation = _get_activation_fn(activation)
else:
self.activation = activation
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerDecoderLayer, self).__setstate__(state)
def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequence from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
# see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
x = tgt
if self.norm_first:
x = x + self._sa_block(self.norm1(x), tgt_mask, tgt_key_padding_mask)
x = x + self._mha_block(self.norm2(x), memory, memory_mask, memory_key_padding_mask)
x = x + self._ff_block(self.norm3(x))
else:
x = self.norm1(x + self._sa_block(x, tgt_mask, tgt_key_padding_mask))
x = self.norm2(x + self._mha_block(x, memory, memory_mask, memory_key_padding_mask))
x = self.norm3(x + self._ff_block(x))
return x
# self-attention block
def _sa_block(self, x: Tensor,
attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:
x = self.self_attn(x, x, x,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False)[0]
return self.dropout1(x)
# multihead attention block
def _mha_block(self, x: Tensor, mem: Tensor,
attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:
x = self.multihead_attn(x, mem, mem,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False)[0]
return self.dropout2(x)
# feed forward block
def _ff_block(self, x: Tensor) -> Tensor:
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout3(x)
def _get_clones(module, N):
return ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation: str) -> Callable[[Tensor], Tensor]:
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
raise RuntimeError("activation should be relu/gelu, not {}".format(activation))
| pytorch-master | torch/nn/modules/transformer.py |
from typing import Optional
import torch
from torch import Tensor
from torch.nn.parameter import Parameter
from .module import Module
from .. import functional as F
from .. import init
__all__ = ['Embedding', 'EmbeddingBag']
class Embedding(Module):
r"""A simple lookup table that stores embeddings of a fixed dictionary and size.
This module is often used to store word embeddings and retrieve them using indices.
The input to the module is a list of indices, and the output is the corresponding
word embeddings.
Args:
num_embeddings (int): size of the dictionary of embeddings
embedding_dim (int): the size of each embedding vector
padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
i.e. it remains as a fixed "pad". For a newly constructed Embedding,
the embedding vector at :attr:`padding_idx` will default to all zeros,
but can be updated to another value to be used as the padding vector.
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
is renormalized to have norm :attr:`max_norm`.
norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of
the words in the mini-batch. Default ``False``.
sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor.
See Notes for more details regarding sparse gradients.
Attributes:
weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)
initialized from :math:`\mathcal{N}(0, 1)`
Shape:
- Input: :math:`(*)`, IntTensor or LongTensor of arbitrary shape containing the indices to extract
- Output: :math:`(*, H)`, where `*` is the input shape and :math:`H=\text{embedding\_dim}`
.. note::
Keep in mind that only a limited number of optimizers support
sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`),
:class:`optim.SparseAdam` (`CUDA` and `CPU`) and :class:`optim.Adagrad` (`CPU`)
.. note::
When :attr:`max_norm` is not ``None``, :class:`Embedding`'s forward method will modify the
:attr:`weight` tensor in-place. Since tensors needed for gradient computations cannot be
modified in-place, performing a differentiable operation on ``Embedding.weight`` before
calling :class:`Embedding`'s forward method requires cloning ``Embedding.weight`` when
:attr:`max_norm` is not ``None``. For example::
n, d, m = 3, 5, 7
embedding = nn.Embedding(n, d, max_norm=True)
W = torch.randn((m, d), requires_grad=True)
idx = torch.tensor([1, 2])
a = embedding.weight.clone() @ W.t() # weight must be cloned for this to be differentiable
b = embedding(idx) @ W.t() # modifies weight in-place
out = (a.unsqueeze(0) + b.unsqueeze(1))
loss = out.sigmoid().prod()
loss.backward()
Examples::
>>> # an Embedding module containing 10 tensors of size 3
>>> embedding = nn.Embedding(10, 3)
>>> # a batch of 2 samples of 4 indices each
>>> input = torch.LongTensor([[1,2,4,5],[4,3,2,9]])
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> embedding(input)
tensor([[[-0.0251, -1.6902, 0.7172],
[-0.6431, 0.0748, 0.6969],
[ 1.4970, 1.3448, -0.9685],
[-0.3677, -2.7265, -0.1685]],
[[ 1.4970, 1.3448, -0.9685],
[ 0.4362, -0.4004, 0.9400],
[-0.6431, 0.0748, 0.6969],
[ 0.9124, -2.3616, 1.1151]]])
>>> # example with padding_idx
>>> embedding = nn.Embedding(10, 3, padding_idx=0)
>>> input = torch.LongTensor([[0,2,0,5]])
>>> embedding(input)
tensor([[[ 0.0000, 0.0000, 0.0000],
[ 0.1535, -2.0309, 0.9315],
[ 0.0000, 0.0000, 0.0000],
[-0.1655, 0.9897, 0.0635]]])
>>> # example of changing `pad` vector
>>> padding_idx = 0
>>> embedding = nn.Embedding(3, 3, padding_idx=padding_idx)
>>> embedding.weight
Parameter containing:
tensor([[ 0.0000, 0.0000, 0.0000],
[-0.7895, -0.7089, -0.0364],
[ 0.6778, 0.5803, 0.2678]], requires_grad=True)
>>> with torch.no_grad():
... embedding.weight[padding_idx] = torch.ones(3)
>>> embedding.weight
Parameter containing:
tensor([[ 1.0000, 1.0000, 1.0000],
[-0.7895, -0.7089, -0.0364],
[ 0.6778, 0.5803, 0.2678]], requires_grad=True)
"""
__constants__ = ['num_embeddings', 'embedding_dim', 'padding_idx', 'max_norm',
'norm_type', 'scale_grad_by_freq', 'sparse']
num_embeddings: int
embedding_dim: int
padding_idx: Optional[int]
max_norm: Optional[float]
norm_type: float
scale_grad_by_freq: bool
weight: Tensor
sparse: bool
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None,
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
sparse: bool = False, _weight: Optional[Tensor] = None,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(Embedding, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
elif padding_idx < 0:
assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
if _weight is None:
self.weight = Parameter(torch.empty((num_embeddings, embedding_dim), **factory_kwargs))
self.reset_parameters()
else:
assert list(_weight.shape) == [num_embeddings, embedding_dim], \
'Shape of weight does not match num_embeddings and embedding_dim'
self.weight = Parameter(_weight)
self.sparse = sparse
def reset_parameters(self) -> None:
init.normal_(self.weight)
self._fill_padding_idx_with_zero()
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input: Tensor) -> Tensor:
return F.embedding(
input, self.weight, self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse)
def extra_repr(self) -> str:
s = '{num_embeddings}, {embedding_dim}'
if self.padding_idx is not None:
s += ', padding_idx={padding_idx}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
if self.sparse is not False:
s += ', sparse=True'
return s.format(**self.__dict__)
@classmethod
def from_pretrained(cls, embeddings, freeze=True, padding_idx=None,
max_norm=None, norm_type=2., scale_grad_by_freq=False,
sparse=False):
r"""Creates Embedding instance from given 2-dimensional FloatTensor.
Args:
embeddings (Tensor): FloatTensor containing weights for the Embedding.
First dimension is being passed to Embedding as ``num_embeddings``, second as ``embedding_dim``.
freeze (bool, optional): If ``True``, the tensor does not get updated in the learning process.
Equivalent to ``embedding.weight.requires_grad = False``. Default: ``True``
padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
i.e. it remains as a fixed "pad".
max_norm (float, optional): See module initialization documentation.
norm_type (float, optional): See module initialization documentation. Default ``2``.
scale_grad_by_freq (bool, optional): See module initialization documentation. Default ``False``.
sparse (bool, optional): See module initialization documentation.
Examples::
>>> # FloatTensor containing pretrained weights
>>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
>>> embedding = nn.Embedding.from_pretrained(weight)
>>> # Get embeddings for index 1
>>> input = torch.LongTensor([1])
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> embedding(input)
tensor([[ 4.0000, 5.1000, 6.3000]])
"""
assert embeddings.dim() == 2, \
'Embeddings parameter is expected to be 2-dimensional'
rows, cols = embeddings.shape
embedding = cls(
num_embeddings=rows,
embedding_dim=cols,
_weight=embeddings,
padding_idx=padding_idx,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse)
embedding.weight.requires_grad = not freeze
return embedding
class EmbeddingBag(Module):
r"""Computes sums or means of 'bags' of embeddings, without instantiating the
intermediate embeddings.
For bags of constant length, no :attr:`per_sample_weights`, no indices equal to :attr:`padding_idx`,
and with 2D inputs, this class
* with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=1)``,
* with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=1)``,
* with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=1)``.
However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these
operations.
EmbeddingBag also supports per-sample weights as an argument to the forward
pass. This scales the output of the Embedding before performing a weighted
reduction as specified by ``mode``. If :attr:`per_sample_weights` is passed, the
only supported ``mode`` is ``"sum"``, which computes a weighted sum according to
:attr:`per_sample_weights`.
Args:
num_embeddings (int): size of the dictionary of embeddings
embedding_dim (int): the size of each embedding vector
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
is renormalized to have norm :attr:`max_norm`.
norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
scale_grad_by_freq (bool, optional): if given, this will scale gradients by the inverse of frequency of
the words in the mini-batch. Default ``False``.
Note: this option is not supported when ``mode="max"``.
mode (str, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights`
into consideration. ``"mean"`` computes the average of the values
in the bag, ``"max"`` computes the max value over each bag.
Default: ``"mean"``
sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See
Notes for more details regarding sparse gradients. Note: this option is not
supported when ``mode="max"``.
include_last_offset (bool, optional): if ``True``, :attr:`offsets` has one additional element, where the last element
is equivalent to the size of `indices`. This matches the CSR format.
padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the
gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated
during training, i.e. it remains as a fixed "pad". For a newly constructed
EmbeddingBag, the embedding vector at :attr:`padding_idx` will default to all
zeros, but can be updated to another value to be used as the padding vector.
Note that the embedding vector at :attr:`padding_idx` is excluded from the
reduction.
Attributes:
weight (Tensor): the learnable weights of the module of shape `(num_embeddings, embedding_dim)`
initialized from :math:`\mathcal{N}(0, 1)`.
Examples::
>>> # an EmbeddingBag module containing 10 tensors of size 3
>>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum')
>>> # a batch of 2 samples of 4 indices each
>>> input = torch.tensor([1,2,4,5,4,3,2,9], dtype=torch.long)
>>> offsets = torch.tensor([0,4], dtype=torch.long)
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> embedding_sum(input, offsets)
tensor([[-0.8861, -5.4350, -0.0523],
[ 1.1306, -2.5798, -1.0044]])
>>> # Example with padding_idx
>>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum', padding_idx=2)
>>> input = torch.tensor([2, 2, 2, 2, 4, 3, 2, 9], dtype=torch.long)
>>> offsets = torch.tensor([0,4], dtype=torch.long)
>>> embedding_sum(input, offsets)
tensor([[ 0.0000, 0.0000, 0.0000],
[-0.7082, 3.2145, -2.6251]])
>>> # An EmbeddingBag can be loaded from an Embedding like so
>>> embedding = nn.Embedding(10, 3, padding_idx=2)
>>> embedding_sum = nn.EmbeddingBag.from_pretrained(
embedding.weight,
padding_idx=embedding.padding_idx,
mode='sum')
"""
__constants__ = ['num_embeddings', 'embedding_dim', 'max_norm', 'norm_type',
'scale_grad_by_freq', 'mode', 'sparse', 'include_last_offset',
'padding_idx']
num_embeddings: int
embedding_dim: int
max_norm: Optional[float]
norm_type: float
scale_grad_by_freq: bool
weight: Tensor
mode: str
sparse: bool
include_last_offset: bool
padding_idx: Optional[int]
def __init__(self, num_embeddings: int, embedding_dim: int,
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
mode: str = 'mean', sparse: bool = False, _weight: Optional[Tensor] = None,
include_last_offset: bool = False, padding_idx: Optional[int] = None,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(EmbeddingBag, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < self.num_embeddings, 'padding_idx must be within num_embeddings'
elif padding_idx < 0:
assert padding_idx >= -self.num_embeddings, 'padding_idx must be within num_embeddings'
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
if _weight is None:
self.weight = Parameter(torch.empty((num_embeddings, embedding_dim), **factory_kwargs))
self.reset_parameters()
else:
assert list(_weight.shape) == [num_embeddings, embedding_dim], \
'Shape of weight does not match num_embeddings and embedding_dim'
self.weight = Parameter(_weight)
self.mode = mode
self.sparse = sparse
self.include_last_offset = include_last_offset
def reset_parameters(self) -> None:
init.normal_(self.weight)
self._fill_padding_idx_with_zero()
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input: Tensor, offsets: Optional[Tensor] = None, per_sample_weights: Optional[Tensor] = None) -> Tensor:
"""Forward pass of EmbeddingBag.
Args:
input (Tensor): Tensor containing bags of indices into the embedding matrix.
offsets (Tensor, optional): Only used when :attr:`input` is 1D. :attr:`offsets` determines
the starting index position of each bag (sequence) in :attr:`input`.
per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights`
must have exactly the same shape as input and is treated as having the same
:attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``.
Returns:
Tensor output shape of `(B, embedding_dim)`.
.. note::
A few notes about ``input`` and ``offsets``:
- :attr:`input` and :attr:`offsets` have to be of the same type, either int or long
- If :attr:`input` is 2D of shape `(B, N)`, it will be treated as ``B`` bags (sequences)
each of fixed length ``N``, and this will return ``B`` values aggregated in a way
depending on the :attr:`mode`. :attr:`offsets` is ignored and required to be ``None`` in this case.
- If :attr:`input` is 1D of shape `(N)`, it will be treated as a concatenation of
multiple bags (sequences). :attr:`offsets` is required to be a 1D tensor containing the
starting index positions of each bag in :attr:`input`. Therefore, for :attr:`offsets` of shape `(B)`,
:attr:`input` will be viewed as having ``B`` bags. Empty bags (i.e., having 0-length) will have
returned vectors filled by zeros.
"""
return F.embedding_bag(input, self.weight, offsets,
self.max_norm, self.norm_type,
self.scale_grad_by_freq, self.mode, self.sparse,
per_sample_weights, self.include_last_offset,
self.padding_idx)
def extra_repr(self) -> str:
s = '{num_embeddings}, {embedding_dim}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
s += ', mode={mode}'
if self.padding_idx is not None:
s += ', padding_idx={padding_idx}'
return s.format(**self.__dict__)
@classmethod
def from_pretrained(cls, embeddings: Tensor, freeze: bool = True, max_norm: Optional[float] = None,
norm_type: float = 2., scale_grad_by_freq: bool = False,
mode: str = 'mean', sparse: bool = False, include_last_offset: bool = False,
padding_idx: Optional[int] = None) -> 'EmbeddingBag':
r"""Creates EmbeddingBag instance from given 2-dimensional FloatTensor.
Args:
embeddings (Tensor): FloatTensor containing weights for the EmbeddingBag.
First dimension is being passed to EmbeddingBag as 'num_embeddings', second as 'embedding_dim'.
freeze (bool, optional): If ``True``, the tensor does not get updated in the learning process.
Equivalent to ``embeddingbag.weight.requires_grad = False``. Default: ``True``
max_norm (float, optional): See module initialization documentation. Default: ``None``
norm_type (float, optional): See module initialization documentation. Default ``2``.
scale_grad_by_freq (bool, optional): See module initialization documentation. Default ``False``.
mode (str, optional): See module initialization documentation. Default: ``"mean"``
sparse (bool, optional): See module initialization documentation. Default: ``False``.
include_last_offset (bool, optional): See module initialization documentation. Default: ``False``.
padding_idx (int, optional): See module initialization documentation. Default: ``None``.
Examples::
>>> # FloatTensor containing pretrained weights
>>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
>>> embeddingbag = nn.EmbeddingBag.from_pretrained(weight)
>>> # Get embeddings for index 1
>>> input = torch.LongTensor([[1, 0]])
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> embeddingbag(input)
tensor([[ 2.5000, 3.7000, 4.6500]])
"""
assert embeddings.dim() == 2, \
'Embeddings parameter is expected to be 2-dimensional'
rows, cols = embeddings.shape
embeddingbag = cls(
num_embeddings=rows,
embedding_dim=cols,
_weight=embeddings,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
mode=mode,
sparse=sparse,
include_last_offset=include_last_offset,
padding_idx=padding_idx)
embeddingbag.weight.requires_grad = not freeze
return embeddingbag
| pytorch-master | torch/nn/modules/sparse.py |
from collections import OrderedDict, namedtuple
import itertools
import warnings
import functools
import weakref
import torch
from ..parameter import Parameter
import torch.utils.hooks as hooks
from torch import Tensor, device, dtype
from typing import Union, Tuple, Any, Callable, Iterator, Set, Optional, overload, TypeVar, Mapping, Dict, List
from ...utils.hooks import RemovableHandle
__all__ = ['register_module_forward_pre_hook', 'register_module_forward_hook', 'register_module_backward_hook',
'register_module_full_backward_hook', 'Module']
_grad_t = Union[Tuple[Tensor, ...], Tensor]
# See https://mypy.readthedocs.io/en/latest/generics.html#generic-methods-and-generic-self for the use
# of `T` to annotate `self`. Many methods of `Module` return `self` and we want those return values to be
# the type of the subclass, not the looser type of `Module`.
T = TypeVar('T', bound='Module')
class _IncompatibleKeys(namedtuple('IncompatibleKeys', ['missing_keys', 'unexpected_keys'])):
def __repr__(self):
if not self.missing_keys and not self.unexpected_keys:
return '<All keys matched successfully>'
return super(_IncompatibleKeys, self).__repr__()
__str__ = __repr__
def _addindent(s_, numSpaces):
s = s_.split('\n')
# don't do anything for single-line stuff
if len(s) == 1:
return s_
first = s.pop(0)
s = [(numSpaces * ' ') + line for line in s]
s = '\n'.join(s)
s = first + '\n' + s
return s
class _WrappedHook:
def __init__(self, hook: Callable, module: Optional["Module"] = None):
self.hook: Callable = hook
functools.update_wrapper(self, hook)
self.with_module: bool = False
if module is not None:
self.module: weakref.ReferenceType["Module"] = weakref.ref(module)
self.with_module = True
def __call__(self, *args: Any, **kwargs: Any) -> Any:
if self.with_module:
module = self.module()
if module is None:
raise RuntimeError("You are trying to call the hook of a dead Module!")
return self.hook(module, *args, **kwargs)
return self.hook(*args, **kwargs)
def __getstate__(self) -> Dict:
result = {"hook": self.hook, "with_module": self.with_module}
if self.with_module:
result["module"] = self.module()
return result
def __setstate__(self, state: Dict):
self.hook = state["hook"]
self.with_module = state["with_module"]
if self.with_module:
if state["module"] is None:
raise RuntimeError("You are trying to revive the hook of a dead Module!")
self.module = weakref.ref(state["module"])
r"""This tracks hooks common to all modules that are executed before/after
calling forward and backward. This is global state used for debugging/profiling
purposes"""
_global_backward_hooks: Dict[int, Callable] = OrderedDict()
_global_is_full_backward_hook: Optional[bool] = None
_global_forward_pre_hooks: Dict[int, Callable] = OrderedDict()
_global_forward_hooks: Dict[int, Callable] = OrderedDict()
_EXTRA_STATE_KEY_SUFFIX = '_extra_state'
def register_module_forward_pre_hook(hook: Callable[..., None]) -> RemovableHandle:
r"""Registers a forward pre-hook common to all modules.
.. warning ::
This adds global state to the `nn.module` module
and it is only intended for debugging/profiling purposes.
The hook will be called every time before :func:`forward` is invoked.
It should have the following signature::
hook(module, input) -> None or modified input
The input contains only the positional arguments given to the module.
Keyword arguments won't be passed to the hooks and only to the ``forward``.
The hook can modify the input. User can either return a tuple or a
single modified value in the hook. We will wrap the value into a tuple
if a single value is returned(unless that value is already a tuple).
This hook has precedence over the specific module hooks registered with
``register_forward_pre_hook``.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(_global_forward_pre_hooks)
_global_forward_pre_hooks[handle.id] = hook
return handle
def register_module_forward_hook(hook: Callable[..., None]) -> RemovableHandle:
r"""Registers a global forward hook for all the modules
.. warning ::
This adds global state to the `nn.module` module
and it is only intended for debugging/profiling purposes.
The hook will be called every time after :func:`forward` has computed an output.
It should have the following signature::
hook(module, input, output) -> None or modified output
The input contains only the positional arguments given to the module.
Keyword arguments won't be passed to the hooks and only to the ``forward``.
The hook can modify the output. It can modify the input inplace but
it will not have effect on forward since this is called after
:func:`forward` is called.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
This hook will be executed before specific module hooks registered with
``register_forward_hook``.
"""
handle = hooks.RemovableHandle(_global_forward_hooks)
_global_forward_hooks[handle.id] = hook
return handle
def register_module_backward_hook(
hook: Callable[['Module', _grad_t, _grad_t], Union[None, Tensor]]
) -> RemovableHandle:
r"""Registers a backward hook common to all the modules.
This function is deprecated in favor of
:func:`torch.nn.modules.module.register_module_full_backward_hook`
and the behavior of this function will change in future versions.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
global _global_is_full_backward_hook
if _global_is_full_backward_hook is True:
raise RuntimeError("Cannot use both regular backward hooks and full backward hooks as a "
"global Module hook. Please use only one of them.")
_global_is_full_backward_hook = False
handle = hooks.RemovableHandle(_global_backward_hooks)
_global_backward_hooks[handle.id] = hook
return handle
def register_module_full_backward_hook(
hook: Callable[['Module', _grad_t, _grad_t], Union[None, Tensor]]
) -> RemovableHandle:
r"""Registers a backward hook common to all the modules.
.. warning ::
This adds global state to the `nn.module` module
and it is only intended for debugging/profiling purposes.
The hook will be called every time the gradients with respect to module
inputs are computed. The hook should have the following signature::
hook(module, grad_input, grad_output) -> Tensor or None
The :attr:`grad_input` and :attr:`grad_output` are tuples. The hook should
not modify its arguments, but it can optionally return a new gradient with
respect to the input that will be used in place of :attr:`grad_input` in
subsequent computations. :attr:`grad_input` will only correspond to the inputs given
as positional arguments and all kwarg arguments will not appear in the hook. Entries
in :attr:`grad_input` and :attr:`grad_output` will be ``None`` for all non-Tensor
arguments.
For technical reasons, when this hook is applied to a Module, its forward function will
receive a view of each Tensor passed to the Module. Similarly the caller will receive a view
of each Tensor returned by the Module's forward function.
Global hooks are called before hooks registered with `register_backward_hook`
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
global _global_is_full_backward_hook
if _global_is_full_backward_hook is False:
raise RuntimeError("Cannot use both regular backward hooks and full backward hooks as a "
"global Module hook. Please use only one of them.")
_global_is_full_backward_hook = True
handle = hooks.RemovableHandle(_global_backward_hooks)
_global_backward_hooks[handle.id] = hook
return handle
# Trick mypy into not applying contravariance rules to inputs by defining
# forward as a value, rather than a function. See also
# https://github.com/python/mypy/issues/8795
def _forward_unimplemented(self, *input: Any) -> None:
r"""Defines the computation performed at every call.
Should be overridden by all subclasses.
.. note::
Although the recipe for forward pass needs to be defined within
this function, one should call the :class:`Module` instance afterwards
instead of this since the former takes care of running the
registered hooks while the latter silently ignores them.
"""
raise NotImplementedError(f"Module [{type(self).__name__}] is missing the required \"forward\" function")
class Module:
r"""Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in
a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call :meth:`to`, etc.
.. note::
As per the example above, an ``__init__()`` call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or
evaluation mode.
:vartype training: bool
"""
dump_patches: bool = False
_version: int = 1
r"""This allows better BC support for :meth:`load_state_dict`. In
:meth:`state_dict`, the version number will be saved as in the attribute
`_metadata` of the returned state dict, and thus pickled. `_metadata` is a
dictionary with keys that follow the naming convention of state dict. See
``_load_from_state_dict`` on how to use this information in loading.
If new parameters/buffers are added/removed from a module, this number shall
be bumped, and the module's `_load_from_state_dict` method can compare the
version number and do appropriate changes if the state dict is from before
the change."""
training: bool
_parameters: Dict[str, Optional[Parameter]]
_buffers: Dict[str, Optional[Tensor]]
_non_persistent_buffers_set: Set[str]
_backward_hooks: Dict[int, Callable]
_is_full_backward_hook: Optional[bool]
_forward_hooks: Dict[int, Callable]
_forward_pre_hooks: Dict[int, Callable]
_state_dict_hooks: Dict[int, Callable]
_load_state_dict_pre_hooks: Dict[int, Callable]
_load_state_dict_post_hooks: Dict[int, Callable]
_modules: Dict[str, Optional['Module']]
def __init__(self) -> None:
"""
Initializes internal Module state, shared by both nn.Module and ScriptModule.
"""
torch._C._log_api_usage_once("python.nn_module")
"""
Calls super().__setattr__('a', a) instead of the typical self.a = a
to avoid Module.__setattr__ overhead. Module's __setattr__ has special
handling for parameters, submodules, and buffers but simply calls into
super().__setattr__ for all other attributes.
"""
super().__setattr__('training', True)
super().__setattr__('_parameters', OrderedDict())
super().__setattr__('_buffers', OrderedDict())
super().__setattr__('_non_persistent_buffers_set', set())
super().__setattr__('_backward_hooks', OrderedDict())
super().__setattr__('_is_full_backward_hook', None)
super().__setattr__('_forward_hooks', OrderedDict())
super().__setattr__('_forward_pre_hooks', OrderedDict())
super().__setattr__('_state_dict_hooks', OrderedDict())
super().__setattr__('_load_state_dict_pre_hooks', OrderedDict())
super().__setattr__('_load_state_dict_post_hooks', OrderedDict())
super().__setattr__('_modules', OrderedDict())
forward: Callable[..., Any] = _forward_unimplemented
def register_buffer(self, name: str, tensor: Optional[Tensor], persistent: bool = True) -> None:
r"""Adds a buffer to the module.
This is typically used to register a buffer that should not to be
considered a model parameter. For example, BatchNorm's ``running_mean``
is not a parameter, but is part of the module's state. Buffers, by
default, are persistent and will be saved alongside parameters. This
behavior can be changed by setting :attr:`persistent` to ``False``. The
only difference between a persistent buffer and a non-persistent buffer
is that the latter will not be a part of this module's
:attr:`state_dict`.
Buffers can be accessed as attributes using given names.
Args:
name (str): name of the buffer. The buffer can be accessed
from this module using the given name
tensor (Tensor or None): buffer to be registered. If ``None``, then operations
that run on buffers, such as :attr:`cuda`, are ignored. If ``None``,
the buffer is **not** included in the module's :attr:`state_dict`.
persistent (bool): whether the buffer is part of this module's
:attr:`state_dict`.
Example::
>>> # xdoctest: +SKIP("undefined vars")
>>> self.register_buffer('running_mean', torch.zeros(num_features))
"""
if persistent is False and isinstance(self, torch.jit.ScriptModule):
raise RuntimeError("ScriptModule does not support non-persistent buffers")
if '_buffers' not in self.__dict__:
raise AttributeError(
"cannot assign buffer before Module.__init__() call")
elif not isinstance(name, torch._six.string_classes):
raise TypeError("buffer name should be a string. "
"Got {}".format(torch.typename(name)))
elif '.' in name:
raise KeyError("buffer name can't contain \".\"")
elif name == '':
raise KeyError("buffer name can't be empty string \"\"")
elif hasattr(self, name) and name not in self._buffers:
raise KeyError("attribute '{}' already exists".format(name))
elif tensor is not None and not isinstance(tensor, torch.Tensor):
raise TypeError("cannot assign '{}' object to buffer '{}' "
"(torch Tensor or None required)"
.format(torch.typename(tensor), name))
else:
self._buffers[name] = tensor
if persistent:
self._non_persistent_buffers_set.discard(name)
else:
self._non_persistent_buffers_set.add(name)
def register_parameter(self, name: str, param: Optional[Parameter]) -> None:
r"""Adds a parameter to the module.
The parameter can be accessed as an attribute using given name.
Args:
name (str): name of the parameter. The parameter can be accessed
from this module using the given name
param (Parameter or None): parameter to be added to the module. If
``None``, then operations that run on parameters, such as :attr:`cuda`,
are ignored. If ``None``, the parameter is **not** included in the
module's :attr:`state_dict`.
"""
if '_parameters' not in self.__dict__:
raise AttributeError(
"cannot assign parameter before Module.__init__() call")
elif not isinstance(name, torch._six.string_classes):
raise TypeError("parameter name should be a string. "
"Got {}".format(torch.typename(name)))
elif '.' in name:
raise KeyError("parameter name can't contain \".\"")
elif name == '':
raise KeyError("parameter name can't be empty string \"\"")
elif hasattr(self, name) and name not in self._parameters:
raise KeyError("attribute '{}' already exists".format(name))
if param is None:
self._parameters[name] = None
elif not isinstance(param, Parameter):
raise TypeError("cannot assign '{}' object to parameter '{}' "
"(torch.nn.Parameter or None required)"
.format(torch.typename(param), name))
elif param.grad_fn:
raise ValueError(
"Cannot assign non-leaf Tensor to parameter '{0}'. Model "
"parameters must be created explicitly. To express '{0}' "
"as a function of another Tensor, compute the value in "
"the forward() method.".format(name))
else:
self._parameters[name] = param
def add_module(self, name: str, module: Optional['Module']) -> None:
r"""Adds a child module to the current module.
The module can be accessed as an attribute using the given name.
Args:
name (str): name of the child module. The child module can be
accessed from this module using the given name
module (Module): child module to be added to the module.
"""
if not isinstance(module, Module) and module is not None:
raise TypeError("{} is not a Module subclass".format(
torch.typename(module)))
elif not isinstance(name, torch._six.string_classes):
raise TypeError("module name should be a string. Got {}".format(
torch.typename(name)))
elif hasattr(self, name) and name not in self._modules:
raise KeyError("attribute '{}' already exists".format(name))
elif '.' in name:
raise KeyError("module name can't contain \".\", got: {}".format(name))
elif name == '':
raise KeyError("module name can't be empty string \"\"")
self._modules[name] = module
def register_module(self, name: str, module: Optional['Module']) -> None:
r"""Alias for :func:`add_module`."""
self.add_module(name, module)
def get_submodule(self, target: str) -> "Module":
"""
Returns the submodule given by ``target`` if it exists,
otherwise throws an error.
For example, let's say you have an ``nn.Module`` ``A`` that
looks like this:
.. code-block:: text
A(
(net_b): Module(
(net_c): Module(
(conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))
)
(linear): Linear(in_features=100, out_features=200, bias=True)
)
)
(The diagram shows an ``nn.Module`` ``A``. ``A`` has a nested
submodule ``net_b``, which itself has two submodules ``net_c``
and ``linear``. ``net_c`` then has a submodule ``conv``.)
To check whether or not we have the ``linear`` submodule, we
would call ``get_submodule("net_b.linear")``. To check whether
we have the ``conv`` submodule, we would call
``get_submodule("net_b.net_c.conv")``.
The runtime of ``get_submodule`` is bounded by the degree
of module nesting in ``target``. A query against
``named_modules`` achieves the same result, but it is O(N) in
the number of transitive modules. So, for a simple check to see
if some submodule exists, ``get_submodule`` should always be
used.
Args:
target: The fully-qualified string name of the submodule
to look for. (See above example for how to specify a
fully-qualified string.)
Returns:
torch.nn.Module: The submodule referenced by ``target``
Raises:
AttributeError: If the target string references an invalid
path or resolves to something that is not an
``nn.Module``
"""
if target == "":
return self
atoms: List[str] = target.split(".")
mod: torch.nn.Module = self
for item in atoms:
if not hasattr(mod, item):
raise AttributeError(mod._get_name() + " has no "
"attribute `" + item + "`")
mod = getattr(mod, item)
if not isinstance(mod, torch.nn.Module):
raise AttributeError("`" + item + "` is not "
"an nn.Module")
return mod
def get_parameter(self, target: str) -> "Parameter":
"""
Returns the parameter given by ``target`` if it exists,
otherwise throws an error.
See the docstring for ``get_submodule`` for a more detailed
explanation of this method's functionality as well as how to
correctly specify ``target``.
Args:
target: The fully-qualified string name of the Parameter
to look for. (See ``get_submodule`` for how to specify a
fully-qualified string.)
Returns:
torch.nn.Parameter: The Parameter referenced by ``target``
Raises:
AttributeError: If the target string references an invalid
path or resolves to something that is not an
``nn.Parameter``
"""
module_path, _, param_name = target.rpartition(".")
mod: torch.nn.Module = self.get_submodule(module_path)
if not hasattr(mod, param_name):
raise AttributeError(mod._get_name() + " has no attribute `"
+ param_name + "`")
param: torch.nn.Parameter = getattr(mod, param_name)
if not isinstance(param, torch.nn.Parameter):
raise AttributeError("`" + param_name + "` is not an "
"nn.Parameter")
return param
def get_buffer(self, target: str) -> "Tensor":
"""
Returns the buffer given by ``target`` if it exists,
otherwise throws an error.
See the docstring for ``get_submodule`` for a more detailed
explanation of this method's functionality as well as how to
correctly specify ``target``.
Args:
target: The fully-qualified string name of the buffer
to look for. (See ``get_submodule`` for how to specify a
fully-qualified string.)
Returns:
torch.Tensor: The buffer referenced by ``target``
Raises:
AttributeError: If the target string references an invalid
path or resolves to something that is not a
buffer
"""
module_path, _, buffer_name = target.rpartition(".")
mod: torch.nn.Module = self.get_submodule(module_path)
if not hasattr(mod, buffer_name):
raise AttributeError(mod._get_name() + " has no attribute `"
+ buffer_name + "`")
buffer: torch.Tensor = getattr(mod, buffer_name)
if buffer_name not in mod._buffers:
raise AttributeError("`" + buffer_name + "` is not a buffer")
return buffer
def get_extra_state(self) -> Any:
"""
Returns any extra state to include in the module's state_dict.
Implement this and a corresponding :func:`set_extra_state` for your module
if you need to store extra state. This function is called when building the
module's `state_dict()`.
Note that extra state should be pickleable to ensure working serialization
of the state_dict. We only provide provide backwards compatibility guarantees
for serializing Tensors; other objects may break backwards compatibility if
their serialized pickled form changes.
Returns:
object: Any extra state to store in the module's state_dict
"""
raise RuntimeError(
"Reached a code path in Module.get_extra_state() that should never be called. "
"Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml "
"to report this bug.")
def set_extra_state(self, state: Any):
"""
This function is called from :func:`load_state_dict` to handle any extra state
found within the `state_dict`. Implement this function and a corresponding
:func:`get_extra_state` for your module if you need to store extra state within its
`state_dict`.
Args:
state (dict): Extra state from the `state_dict`
"""
raise RuntimeError(
"Reached a code path in Module.set_extra_state() that should never be called. "
"Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml "
"to report this bug.")
def _apply(self, fn):
for module in self.children():
module._apply(fn)
def compute_should_use_set_data(tensor, tensor_applied):
if torch._has_compatible_shallow_copy_type(tensor, tensor_applied):
# If the new tensor has compatible tensor type as the existing tensor,
# the current behavior is to change the tensor in-place using `.data =`,
# and the future behavior is to overwrite the existing tensor. However,
# changing the current behavior is a BC-breaking change, and we want it
# to happen in future releases. So for now we introduce the
# `torch.__future__.get_overwrite_module_params_on_conversion()`
# global flag to let the user control whether they want the future
# behavior of overwriting the existing tensor or not.
return not torch.__future__.get_overwrite_module_params_on_conversion()
else:
return False
for key, param in self._parameters.items():
if param is None:
continue
# Tensors stored in modules are graph leaves, and we don't want to
# track autograd history of `param_applied`, so we have to use
# `with torch.no_grad():`
with torch.no_grad():
param_applied = fn(param)
should_use_set_data = compute_should_use_set_data(param, param_applied)
if should_use_set_data:
param.data = param_applied
out_param = param
else:
assert isinstance(param, Parameter)
assert param.is_leaf
out_param = Parameter(param_applied, param.requires_grad)
self._parameters[key] = out_param
if param.grad is not None:
with torch.no_grad():
grad_applied = fn(param.grad)
should_use_set_data = compute_should_use_set_data(param.grad, grad_applied)
if should_use_set_data:
assert out_param.grad is not None
out_param.grad.data = grad_applied
else:
assert param.grad.is_leaf
out_param.grad = grad_applied.requires_grad_(param.grad.requires_grad)
for key, buf in self._buffers.items():
if buf is not None:
self._buffers[key] = fn(buf)
return self
def apply(self: T, fn: Callable[['Module'], None]) -> T:
r"""Applies ``fn`` recursively to every submodule (as returned by ``.children()``)
as well as self. Typical use includes initializing the parameters of a model
(see also :ref:`nn-init-doc`).
Args:
fn (:class:`Module` -> None): function to be applied to each submodule
Returns:
Module: self
Example::
>>> @torch.no_grad()
>>> def init_weights(m):
>>> print(m)
>>> if type(m) == nn.Linear:
>>> m.weight.fill_(1.0)
>>> print(m.weight)
>>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
>>> net.apply(init_weights)
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[1., 1.],
[1., 1.]], requires_grad=True)
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[1., 1.],
[1., 1.]], requires_grad=True)
Sequential(
(0): Linear(in_features=2, out_features=2, bias=True)
(1): Linear(in_features=2, out_features=2, bias=True)
)
"""
for module in self.children():
module.apply(fn)
fn(self)
return self
def cuda(self: T, device: Optional[Union[int, device]] = None) -> T:
r"""Moves all model parameters and buffers to the GPU.
This also makes associated parameters and buffers different objects. So
it should be called before constructing optimizer if the module will
live on GPU while being optimized.
.. note::
This method modifies the module in-place.
Args:
device (int, optional): if specified, all parameters will be
copied to that device
Returns:
Module: self
"""
return self._apply(lambda t: t.cuda(device))
def ipu(self: T, device: Optional[Union[int, device]] = None) -> T:
r"""Moves all model parameters and buffers to the IPU.
This also makes associated parameters and buffers different objects. So
it should be called before constructing optimizer if the module will
live on IPU while being optimized.
.. note::
This method modifies the module in-place.
Arguments:
device (int, optional): if specified, all parameters will be
copied to that device
Returns:
Module: self
"""
return self._apply(lambda t: t.ipu(device))
def xpu(self: T, device: Optional[Union[int, device]] = None) -> T:
r"""Moves all model parameters and buffers to the XPU.
This also makes associated parameters and buffers different objects. So
it should be called before constructing optimizer if the module will
live on XPU while being optimized.
.. note::
This method modifies the module in-place.
Arguments:
device (int, optional): if specified, all parameters will be
copied to that device
Returns:
Module: self
"""
return self._apply(lambda t: t.xpu(device))
def cpu(self: T) -> T:
r"""Moves all model parameters and buffers to the CPU.
.. note::
This method modifies the module in-place.
Returns:
Module: self
"""
return self._apply(lambda t: t.cpu())
def type(self: T, dst_type: Union[dtype, str]) -> T:
r"""Casts all parameters and buffers to :attr:`dst_type`.
.. note::
This method modifies the module in-place.
Args:
dst_type (type or string): the desired type
Returns:
Module: self
"""
return self._apply(lambda t: t.type(dst_type))
def float(self: T) -> T:
r"""Casts all floating point parameters and buffers to ``float`` datatype.
.. note::
This method modifies the module in-place.
Returns:
Module: self
"""
return self._apply(lambda t: t.float() if t.is_floating_point() else t)
def double(self: T) -> T:
r"""Casts all floating point parameters and buffers to ``double`` datatype.
.. note::
This method modifies the module in-place.
Returns:
Module: self
"""
return self._apply(lambda t: t.double() if t.is_floating_point() else t)
def half(self: T) -> T:
r"""Casts all floating point parameters and buffers to ``half`` datatype.
.. note::
This method modifies the module in-place.
Returns:
Module: self
"""
return self._apply(lambda t: t.half() if t.is_floating_point() else t)
def bfloat16(self: T) -> T:
r"""Casts all floating point parameters and buffers to ``bfloat16`` datatype.
.. note::
This method modifies the module in-place.
Returns:
Module: self
"""
return self._apply(lambda t: t.bfloat16() if t.is_floating_point() else t)
def to_empty(self: T, *, device: Union[str, device]) -> T:
r"""Moves the parameters and buffers to the specified device without copying storage.
Args:
device (:class:`torch.device`): The desired device of the parameters
and buffers in this module.
Returns:
Module: self
"""
return self._apply(lambda t: torch.empty_like(t, device=device))
@overload
def to(self: T, device: Optional[Union[int, device]] = ..., dtype: Optional[Union[dtype, str]] = ...,
non_blocking: bool = ...) -> T:
...
@overload
def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T:
...
@overload
def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T:
...
def to(self, *args, **kwargs):
r"""Moves and/or casts the parameters and buffers.
This can be called as
.. function:: to(device=None, dtype=None, non_blocking=False)
:noindex:
.. function:: to(dtype, non_blocking=False)
:noindex:
.. function:: to(tensor, non_blocking=False)
:noindex:
.. function:: to(memory_format=torch.channels_last)
:noindex:
Its signature is similar to :meth:`torch.Tensor.to`, but only accepts
floating point or complex :attr:`dtype`\ s. In addition, this method will
only cast the floating point or complex parameters and buffers to :attr:`dtype`
(if given). The integral parameters and buffers will be moved
:attr:`device`, if that is given, but with dtypes unchanged. When
:attr:`non_blocking` is set, it tries to convert/move asynchronously
with respect to the host if possible, e.g., moving CPU Tensors with
pinned memory to CUDA devices.
See below for examples.
.. note::
This method modifies the module in-place.
Args:
device (:class:`torch.device`): the desired device of the parameters
and buffers in this module
dtype (:class:`torch.dtype`): the desired floating point or complex dtype of
the parameters and buffers in this module
tensor (torch.Tensor): Tensor whose dtype and device are the desired
dtype and device for all parameters and buffers in this module
memory_format (:class:`torch.memory_format`): the desired memory
format for 4D parameters and buffers in this module (keyword
only argument)
Returns:
Module: self
Examples::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> linear = nn.Linear(2, 2)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
[-0.5113, -0.2325]])
>>> linear.to(torch.double)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
[-0.5113, -0.2325]], dtype=torch.float64)
>>> gpu1 = torch.device("cuda:1")
>>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
[-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
>>> cpu = torch.device("cpu")
>>> linear.to(cpu)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
[-0.5112, -0.2324]], dtype=torch.float16)
>>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)
>>> linear.weight
Parameter containing:
tensor([[ 0.3741+0.j, 0.2382+0.j],
[ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)
>>> linear(torch.ones(3, 2, dtype=torch.cdouble))
tensor([[0.6122+0.j, 0.1150+0.j],
[0.6122+0.j, 0.1150+0.j],
[0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
"""
device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
if dtype is not None:
if not (dtype.is_floating_point or dtype.is_complex):
raise TypeError('nn.Module.to only accepts floating point or complex '
'dtypes, but got desired dtype={}'.format(dtype))
if dtype.is_complex:
warnings.warn(
"Complex modules are a new feature under active development whose design may change, "
"and some modules might not work as expected when using complex tensors as parameters or buffers. "
"Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml "
"if a complex module does not work as expected.")
def convert(t):
if convert_to_format is not None and t.dim() in (4, 5):
return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None,
non_blocking, memory_format=convert_to_format)
return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
return self._apply(convert)
def register_backward_hook(
self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, Tensor]]
) -> RemovableHandle:
r"""Registers a backward hook on the module.
This function is deprecated in favor of :meth:`~torch.nn.Module.register_full_backward_hook` and
the behavior of this function will change in future versions.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
if self._is_full_backward_hook is True:
raise RuntimeError("Cannot use both regular backward hooks and full backward hooks on a "
"single Module. Please use only one of them.")
self._is_full_backward_hook = False
handle = hooks.RemovableHandle(self._backward_hooks)
self._backward_hooks[handle.id] = hook
return handle
def register_full_backward_hook(
self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, Tensor]]
) -> RemovableHandle:
r"""Registers a backward hook on the module.
The hook will be called every time the gradients with respect to module
inputs are computed. The hook should have the following signature::
hook(module, grad_input, grad_output) -> tuple(Tensor) or None
The :attr:`grad_input` and :attr:`grad_output` are tuples that contain the gradients
with respect to the inputs and outputs respectively. The hook should
not modify its arguments, but it can optionally return a new gradient with
respect to the input that will be used in place of :attr:`grad_input` in
subsequent computations. :attr:`grad_input` will only correspond to the inputs given
as positional arguments and all kwarg arguments are ignored. Entries
in :attr:`grad_input` and :attr:`grad_output` will be ``None`` for all non-Tensor
arguments.
For technical reasons, when this hook is applied to a Module, its forward function will
receive a view of each Tensor passed to the Module. Similarly the caller will receive a view
of each Tensor returned by the Module's forward function.
.. warning ::
Modifying inputs or outputs inplace is not allowed when using backward hooks and
will raise an error.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
if self._is_full_backward_hook is False:
raise RuntimeError("Cannot use both regular backward hooks and full backward hooks on a "
"single Module. Please use only one of them.")
self._is_full_backward_hook = True
handle = hooks.RemovableHandle(self._backward_hooks)
self._backward_hooks[handle.id] = hook
return handle
def _get_backward_hooks(self):
r"""Returns the backward hooks for use in the call function.
It returns two lists, one with the full backward hooks and one with the non-full
backward hooks.
"""
full_backward_hooks: List[Callable] = []
if (_global_is_full_backward_hook is True):
full_backward_hooks += _global_backward_hooks.values()
if (self._is_full_backward_hook is True):
full_backward_hooks += self._backward_hooks.values()
non_full_backward_hooks: List[Callable] = []
if (_global_is_full_backward_hook is False):
non_full_backward_hooks += _global_backward_hooks.values()
if (self._is_full_backward_hook is False):
non_full_backward_hooks += self._backward_hooks.values()
return full_backward_hooks, non_full_backward_hooks
def _maybe_warn_non_full_backward_hook(self, inputs, result, grad_fn):
if not isinstance(result, torch.Tensor):
if not (isinstance(result, tuple) and all([isinstance(r, torch.Tensor) for r in result])):
warnings.warn("Using non-full backward hooks on a Module that does not return a "
"single Tensor or a tuple of Tensors is deprecated and will be removed "
"in future versions. This hook will be missing some of the grad_output. "
"Please use register_full_backward_hook to get the documented behavior.")
return
else:
result = (result,)
if not isinstance(inputs, torch.Tensor):
if not (isinstance(inputs, tuple) and all([isinstance(i, torch.Tensor) for i in inputs])):
warnings.warn("Using non-full backward hooks on a Module that does not take as input a "
"single Tensor or a tuple of Tensors is deprecated and will be removed "
"in future versions. This hook will be missing some of the grad_input. "
"Please use register_full_backward_hook to get the documented behavior.")
return
else:
inputs = (inputs,)
# At this point we are sure that inputs and result are tuple of Tensors
out_grad_fn = {r.grad_fn for r in result if r.grad_fn is not None}
if len(out_grad_fn) == 0 or (len(out_grad_fn) == 1 and grad_fn not in out_grad_fn):
warnings.warn("Using a non-full backward hook when outputs are nested in python data structure "
"is deprecated and will be removed in future versions. This hook will be missing "
"some grad_output.")
elif len(out_grad_fn) > 1:
warnings.warn("Using a non-full backward hook when outputs are generated by different autograd Nodes "
"is deprecated and will be removed in future versions. This hook will be missing "
"some grad_output. Please use register_full_backward_hook to get the documented behavior.")
else:
# At this point the grad_ouput part of the hook will most likely be correct
inputs_grad_fn = {i.grad_fn for i in inputs if i.grad_fn is not None}
next_functions = {n[0] for n in grad_fn.next_functions}
if inputs_grad_fn != next_functions:
warnings.warn("Using a non-full backward hook when the forward contains multiple autograd Nodes "
"is deprecated and will be removed in future versions. This hook will be missing "
"some grad_input. Please use register_full_backward_hook to get the documented "
"behavior.")
def register_forward_pre_hook(self, hook: Callable[..., None]) -> RemovableHandle:
r"""Registers a forward pre-hook on the module.
The hook will be called every time before :func:`forward` is invoked.
It should have the following signature::
hook(module, input) -> None or modified input
The input contains only the positional arguments given to the module.
Keyword arguments won't be passed to the hooks and only to the ``forward``.
The hook can modify the input. User can either return a tuple or a
single modified value in the hook. We will wrap the value into a tuple
if a single value is returned(unless that value is already a tuple).
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._forward_pre_hooks)
self._forward_pre_hooks[handle.id] = hook
return handle
def register_forward_hook(self, hook: Callable[..., None]) -> RemovableHandle:
r"""Registers a forward hook on the module.
The hook will be called every time after :func:`forward` has computed an output.
It should have the following signature::
hook(module, input, output) -> None or modified output
The input contains only the positional arguments given to the module.
Keyword arguments won't be passed to the hooks and only to the ``forward``.
The hook can modify the output. It can modify the input inplace but
it will not have effect on forward since this is called after
:func:`forward` is called.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._forward_hooks)
self._forward_hooks[handle.id] = hook
return handle
def _slow_forward(self, *input, **kwargs):
tracing_state = torch._C._get_tracing_state()
if not tracing_state or isinstance(self.forward, torch._C.ScriptMethod):
return self.forward(*input, **kwargs)
recording_scopes = torch.jit._trace._trace_module_map is not None
if recording_scopes:
# type ignore was added because at this point one knows that
# torch.jit._trace._trace_module_map is not Optional and has type Dict[Any, Any]
name = torch.jit._trace._trace_module_map[self] if self in torch.jit._trace._trace_module_map else None # type: ignore[index, operator] # noqa: B950
if name:
tracing_state.push_scope(name)
else:
recording_scopes = False
try:
result = self.forward(*input, **kwargs)
finally:
if recording_scopes:
tracing_state.pop_scope()
return result
def _call_impl(self, *input, **kwargs):
forward_call = (self._slow_forward if torch._C._get_tracing_state() else self.forward)
# If we don't have any hooks, we want to skip the rest of the logic in
# this function, and just call forward.
if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
or _global_forward_hooks or _global_forward_pre_hooks):
return forward_call(*input, **kwargs)
# Do not call functions when jit is used
full_backward_hooks, non_full_backward_hooks = [], []
if self._backward_hooks or _global_backward_hooks:
full_backward_hooks, non_full_backward_hooks = self._get_backward_hooks()
if _global_forward_pre_hooks or self._forward_pre_hooks:
for hook in (*_global_forward_pre_hooks.values(), *self._forward_pre_hooks.values()):
result = hook(self, input)
if result is not None:
if not isinstance(result, tuple):
result = (result,)
input = result
bw_hook = None
if full_backward_hooks:
bw_hook = hooks.BackwardHook(self, full_backward_hooks)
input = bw_hook.setup_input_hook(input)
result = forward_call(*input, **kwargs)
if _global_forward_hooks or self._forward_hooks:
for hook in (*_global_forward_hooks.values(), *self._forward_hooks.values()):
hook_result = hook(self, input, result)
if hook_result is not None:
result = hook_result
if bw_hook:
result = bw_hook.setup_output_hook(result)
# Handle the non-full backward hooks
if non_full_backward_hooks:
var = result
while not isinstance(var, torch.Tensor):
if isinstance(var, dict):
var = next((v for v in var.values() if isinstance(v, torch.Tensor)))
else:
var = var[0]
grad_fn = var.grad_fn
if grad_fn is not None:
for hook in non_full_backward_hooks:
grad_fn.register_hook(_WrappedHook(hook, self))
self._maybe_warn_non_full_backward_hook(input, result, grad_fn)
return result
__call__ : Callable[..., Any] = _call_impl
def __setstate__(self, state):
self.__dict__.update(state)
# Support loading old checkpoints that don't have the following attrs:
if '_forward_pre_hooks' not in self.__dict__:
self._forward_pre_hooks = OrderedDict()
if '_state_dict_hooks' not in self.__dict__:
self._state_dict_hooks = OrderedDict()
if '_load_state_dict_pre_hooks' not in self.__dict__:
self._load_state_dict_pre_hooks = OrderedDict()
if '_load_state_dict_post_hooks' not in self.__dict__:
self._load_state_dict_post_hooks = OrderedDict()
if '_non_persistent_buffers_set' not in self.__dict__:
self._non_persistent_buffers_set = set()
if '_is_full_backward_hook' not in self.__dict__:
self._is_full_backward_hook = None
def __getattr__(self, name: str) -> Union[Tensor, 'Module']:
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return _parameters[name]
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return _buffers[name]
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return modules[name]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, name))
def __setattr__(self, name: str, value: Union[Tensor, 'Module']) -> None:
def remove_from(*dicts_or_sets):
for d in dicts_or_sets:
if name in d:
if isinstance(d, dict):
del d[name]
else:
d.discard(name)
params = self.__dict__.get('_parameters')
if isinstance(value, Parameter):
if params is None:
raise AttributeError(
"cannot assign parameters before Module.__init__() call")
remove_from(self.__dict__, self._buffers, self._modules, self._non_persistent_buffers_set)
self.register_parameter(name, value)
elif params is not None and name in params:
if value is not None:
raise TypeError("cannot assign '{}' as parameter '{}' "
"(torch.nn.Parameter or None expected)"
.format(torch.typename(value), name))
self.register_parameter(name, value)
else:
modules = self.__dict__.get('_modules')
if isinstance(value, Module):
if modules is None:
raise AttributeError(
"cannot assign module before Module.__init__() call")
remove_from(self.__dict__, self._parameters, self._buffers, self._non_persistent_buffers_set)
modules[name] = value
elif modules is not None and name in modules:
if value is not None:
raise TypeError("cannot assign '{}' as child module '{}' "
"(torch.nn.Module or None expected)"
.format(torch.typename(value), name))
modules[name] = value
else:
buffers = self.__dict__.get('_buffers')
if buffers is not None and name in buffers:
if value is not None and not isinstance(value, torch.Tensor):
raise TypeError("cannot assign '{}' as buffer '{}' "
"(torch.Tensor or None expected)"
.format(torch.typename(value), name))
buffers[name] = value
else:
super().__setattr__(name, value)
def __delattr__(self, name):
if name in self._parameters:
del self._parameters[name]
elif name in self._buffers:
del self._buffers[name]
self._non_persistent_buffers_set.discard(name)
elif name in self._modules:
del self._modules[name]
else:
super().__delattr__(name)
def _register_state_dict_hook(self, hook):
r"""These hooks will be called with arguments: `self`, `state_dict`,
`prefix`, `local_metadata`, after the `state_dict` of `self` is set.
Note that only parameters and buffers of `self` or its children are
guaranteed to exist in `state_dict`. The hooks may modify `state_dict`
inplace or return a new one.
"""
handle = hooks.RemovableHandle(self._state_dict_hooks)
self._state_dict_hooks[handle.id] = hook
return handle
def _save_to_state_dict(self, destination, prefix, keep_vars):
r"""Saves module state to `destination` dictionary, containing a state
of the module, but not its descendants. This is called on every
submodule in :meth:`~torch.nn.Module.state_dict`.
In rare cases, subclasses can achieve class-specific behavior by
overriding this method with custom logic.
Args:
destination (dict): a dict where state will be stored
prefix (str): the prefix for parameters and buffers used in this
module
"""
for name, param in self._parameters.items():
if param is not None:
destination[prefix + name] = param if keep_vars else param.detach()
for name, buf in self._buffers.items():
if buf is not None and name not in self._non_persistent_buffers_set:
destination[prefix + name] = buf if keep_vars else buf.detach()
extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX
if getattr(self.__class__, "get_extra_state", Module.get_extra_state) is not Module.get_extra_state:
destination[extra_state_key] = self.get_extra_state()
# The user can pass an optional arbitrary mappable object to `state_dict`, in which case `state_dict` returns
# back that same object. But if they pass nothing, an `OrederedDict` is created and returned.
T_destination = TypeVar('T_destination', bound=Dict[str, Any])
@overload
def state_dict(self, *, destination: T_destination, prefix: str = ..., keep_vars: bool = ...) -> T_destination:
...
@overload
def state_dict(self, *, prefix: str = ..., keep_vars: bool = ...) -> Dict[str, Any]:
...
# TODO: Change `*args` to `*` and remove the copprespinding warning in docs when BC allows.
# Also remove the logic for arg parsing together.
def state_dict(self, *args, destination=None, prefix='', keep_vars=False):
r"""Returns a dictionary containing references to the whole state of the module.
Both parameters and persistent buffers (e.g. running averages) are
included. Keys are corresponding parameter and buffer names.
Parameters and buffers set to ``None`` are not included.
.. note::
The returned object is a shallow copy. It contains references
to the module's parameters and buffers.
.. warning::
Currently ``state_dict()`` also accepts positional arguments for
``destination``, ``prefix`` and ``keep_vars`` in order. However,
this is being deprecated and keyword arguments will be enforced in
future releases.
.. warning::
Please avoid the use of argument ``destination`` as it is not
designed for end-users.
Args:
destination (dict, optional): If provided, the state of module will
be updated into the dict and the same object is returned.
Otherwise, an ``OrderedDict`` will be created and returned.
Default: ``None``.
prefix (str, optional): a prefix added to parameter and buffer
names to compose the keys in state_dict. Default: ``''``.
keep_vars (bool, optional): by default the :class:`~torch.Tensor` s
returned in the state dict are detached from autograd. If it's
set to ``True``, detaching will not be performed.
Default: ``False``.
Returns:
dict:
a dictionary containing a whole state of the module
Example::
>>> # xdoctest: +SKIP("undefined vars")
>>> module.state_dict().keys()
['bias', 'weight']
"""
# TODO: Remove `args` and the parsing logic when BC allows.
if len(args) > 0:
if destination is None:
destination = args[0]
if len(args) > 1 and prefix == '':
prefix = args[1]
if len(args) > 2 and keep_vars is False:
keep_vars = args[2]
# DeprecationWarning is ignored by default
warnings.warn(
"Positional args are being deprecated, use kwargs instead. Refer to "
"https://pytorch.org/docs/master/generated/torch.nn.Module.html#torch.nn.Module.state_dict"
" for details.")
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
local_metadata = dict(version=self._version)
if hasattr(destination, "_metadata"):
destination._metadata[prefix[:-1]] = local_metadata
self._save_to_state_dict(destination, prefix, keep_vars)
for name, module in self._modules.items():
if module is not None:
module.state_dict(destination=destination, prefix=prefix + name + '.', keep_vars=keep_vars)
for hook in self._state_dict_hooks.values():
hook_result = hook(self, destination, prefix, local_metadata)
if hook_result is not None:
destination = hook_result
return destination
def _register_load_state_dict_pre_hook(self, hook, with_module=False):
r"""These hooks will be called with arguments: `state_dict`, `prefix`,
`local_metadata`, `strict`, `missing_keys`, `unexpected_keys`,
`error_msgs`, before loading `state_dict` into `self`. These arguments
are exactly the same as those of `_load_from_state_dict`.
If ``with_module`` is ``True``, then the first argument to the hook is
an instance of the module.
Arguments:
hook (Callable): Callable hook that will be invoked before
loading the state dict.
with_module (bool, optional): Whether or not to pass the module
instance to the hook as the first parameter.
"""
handle = hooks.RemovableHandle(self._load_state_dict_pre_hooks)
self._load_state_dict_pre_hooks[handle.id] = _WrappedHook(hook, self if with_module else None)
return handle
def register_load_state_dict_post_hook(self, hook):
r"""Registers a post hook to be run after module's ``load_state_dict``
is called.
It should have the following signature::
hook(module, incompatible_keys) -> None
The ``module`` argument is the current module that this hook is registered
on, and the ``incompatible_keys`` argument is a ``NamedTuple`` consisting
of attributes ``missing_keys`` and ``unexpected_keys``. ``missing_keys``
is a ``list`` of ``str`` containing the missing keys and
``unexpected_keys`` is a ``list`` of ``str`` containing the unexpected keys.
The given incompatible_keys can be modified inplace if needed.
Note that the checks performed when calling :func:`load_state_dict` with
``strict=True`` are affected by modifications the hook makes to
``missing_keys`` or ``unexpected_keys``, as expected. Additions to either
set of keys will result in an error being thrown when ``strict=True``, and
clearning out both missing and unexpected keys will avoid an error.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._load_state_dict_post_hooks)
self._load_state_dict_post_hooks[handle.id] = hook
return handle
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
r"""Copies parameters and buffers from :attr:`state_dict` into only
this module, but not its descendants. This is called on every submodule
in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this
module in input :attr:`state_dict` is provided as :attr:`local_metadata`.
For state dicts without metadata, :attr:`local_metadata` is empty.
Subclasses can achieve class-specific backward compatible loading using
the version number at `local_metadata.get("version", None)`.
.. note::
:attr:`state_dict` is not the same object as the input
:attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So
it can be modified.
Args:
state_dict (dict): a dict containing parameters and
persistent buffers.
prefix (str): the prefix for parameters and buffers used in this
module
local_metadata (dict): a dict containing the metadata for this module.
See
strict (bool): whether to strictly enforce that the keys in
:attr:`state_dict` with :attr:`prefix` match the names of
parameters and buffers in this module
missing_keys (list of str): if ``strict=True``, add missing keys to
this list
unexpected_keys (list of str): if ``strict=True``, add unexpected
keys to this list
error_msgs (list of str): error messages should be added to this
list, and will be reported together in
:meth:`~torch.nn.Module.load_state_dict`
"""
for hook in self._load_state_dict_pre_hooks.values():
hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
persistent_buffers = {k: v for k, v in self._buffers.items() if k not in self._non_persistent_buffers_set}
local_name_params = itertools.chain(self._parameters.items(), persistent_buffers.items())
local_state = {k: v for k, v in local_name_params if v is not None}
for name, param in local_state.items():
key = prefix + name
if key in state_dict:
input_param = state_dict[key]
if not torch.overrides.is_tensor_like(input_param):
error_msgs.append('While copying the parameter named "{}", '
'expected torch.Tensor or Tensor-like object from checkpoint but '
'received {}'
.format(key, type(input_param)))
continue
# This is used to avoid copying uninitialized parameters into
# non-lazy modules, since they dont have the hook to do the checks
# in such case, it will error when accessing the .shape attribute.
is_param_lazy = torch.nn.parameter.is_lazy(param)
# Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+
if not is_param_lazy and len(param.shape) == 0 and len(input_param.shape) == 1:
input_param = input_param[0]
if not is_param_lazy and input_param.shape != param.shape:
# local shape should match the one in checkpoint
error_msgs.append('size mismatch for {}: copying a param with shape {} from checkpoint, '
'the shape in current model is {}.'
.format(key, input_param.shape, param.shape))
continue
try:
with torch.no_grad():
param.copy_(input_param)
except Exception as ex:
error_msgs.append('While copying the parameter named "{}", '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}, '
'an exception occurred : {}.'
.format(key, param.size(), input_param.size(), ex.args))
elif strict:
missing_keys.append(key)
extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX
if getattr(self.__class__, "set_extra_state", Module.set_extra_state) is not Module.set_extra_state:
if extra_state_key in state_dict:
self.set_extra_state(state_dict[extra_state_key])
elif strict:
missing_keys.append(extra_state_key)
elif strict and (extra_state_key in state_dict):
unexpected_keys.append(extra_state_key)
if strict:
for key in state_dict.keys():
if key.startswith(prefix) and key != extra_state_key:
input_name = key[len(prefix):]
input_name = input_name.split('.', 1)[0] # get the name of param/buffer/child
if input_name not in self._modules and input_name not in local_state:
unexpected_keys.append(key)
def load_state_dict(self, state_dict: Mapping[str, Any],
strict: bool = True):
r"""Copies parameters and buffers from :attr:`state_dict` into
this module and its descendants. If :attr:`strict` is ``True``, then
the keys of :attr:`state_dict` must exactly match the keys returned
by this module's :meth:`~torch.nn.Module.state_dict` function.
Args:
state_dict (dict): a dict containing parameters and
persistent buffers.
strict (bool, optional): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``True``
Returns:
``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields:
* **missing_keys** is a list of str containing the missing keys
* **unexpected_keys** is a list of str containing the unexpected keys
Note:
If a parameter or buffer is registered as ``None`` and its corresponding key
exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a
``RuntimeError``.
"""
if not isinstance(state_dict, Mapping):
raise TypeError("Expected state_dict to be dict-like, got {}.".format(type(state_dict)))
missing_keys: List[str] = []
unexpected_keys: List[str] = []
error_msgs: List[str] = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = OrderedDict(state_dict)
if metadata is not None:
# mypy isn't aware that "_metadata" exists in state_dict
state_dict._metadata = metadata # type: ignore[attr-defined]
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
# Note that the hook can modify missing_keys and unexpected_keys.
incompatible_keys = _IncompatibleKeys(missing_keys, unexpected_keys)
for hook in module._load_state_dict_post_hooks.values():
out = hook(module, incompatible_keys)
assert out is None, (
"Hooks registered with ``register_load_state_dict_post_hook`` are not"
"expected to return new values, if incompatible_keys need to be modified,"
"it should be done inplace."
)
load(self)
del load
if strict:
if len(unexpected_keys) > 0:
error_msgs.insert(
0, 'Unexpected key(s) in state_dict: {}. '.format(
', '.join('"{}"'.format(k) for k in unexpected_keys)))
if len(missing_keys) > 0:
error_msgs.insert(
0, 'Missing key(s) in state_dict: {}. '.format(
', '.join('"{}"'.format(k) for k in missing_keys)))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
self.__class__.__name__, "\n\t".join(error_msgs)))
return _IncompatibleKeys(missing_keys, unexpected_keys)
def _named_members(self, get_members_fn, prefix='', recurse=True):
r"""Helper method for yielding various names + members of modules."""
memo = set()
modules = self.named_modules(prefix=prefix) if recurse else [(prefix, self)]
for module_prefix, module in modules:
members = get_members_fn(module)
for k, v in members:
if v is None or v in memo:
continue
memo.add(v)
name = module_prefix + ('.' if module_prefix else '') + k
yield name, v
def parameters(self, recurse: bool = True) -> Iterator[Parameter]:
r"""Returns an iterator over module parameters.
This is typically passed to an optimizer.
Args:
recurse (bool): if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module.
Yields:
Parameter: module parameter
Example::
>>> # xdoctest: +SKIP("undefined vars")
>>> for param in model.parameters():
>>> print(type(param), param.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
"""
for name, param in self.named_parameters(recurse=recurse):
yield param
def named_parameters(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Parameter]]:
r"""Returns an iterator over module parameters, yielding both the
name of the parameter as well as the parameter itself.
Args:
prefix (str): prefix to prepend to all parameter names.
recurse (bool): if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module.
Yields:
(str, Parameter): Tuple containing the name and parameter
Example::
>>> # xdoctest: +SKIP("undefined vars")
>>> for name, param in self.named_parameters():
>>> if name in ['bias']:
>>> print(param.size())
"""
gen = self._named_members(
lambda module: module._parameters.items(),
prefix=prefix, recurse=recurse)
for elem in gen:
yield elem
def buffers(self, recurse: bool = True) -> Iterator[Tensor]:
r"""Returns an iterator over module buffers.
Args:
recurse (bool): if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module.
Yields:
torch.Tensor: module buffer
Example::
>>> # xdoctest: +SKIP("undefined vars")
>>> for buf in model.buffers():
>>> print(type(buf), buf.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
"""
for _, buf in self.named_buffers(recurse=recurse):
yield buf
def named_buffers(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Tensor]]:
r"""Returns an iterator over module buffers, yielding both the
name of the buffer as well as the buffer itself.
Args:
prefix (str): prefix to prepend to all buffer names.
recurse (bool): if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module.
Yields:
(str, torch.Tensor): Tuple containing the name and buffer
Example::
>>> # xdoctest: +SKIP("undefined vars")
>>> for name, buf in self.named_buffers():
>>> if name in ['running_var']:
>>> print(buf.size())
"""
gen = self._named_members(
lambda module: module._buffers.items(),
prefix=prefix, recurse=recurse)
for elem in gen:
yield elem
def children(self) -> Iterator['Module']:
r"""Returns an iterator over immediate children modules.
Yields:
Module: a child module
"""
for name, module in self.named_children():
yield module
def named_children(self) -> Iterator[Tuple[str, 'Module']]:
r"""Returns an iterator over immediate children modules, yielding both
the name of the module as well as the module itself.
Yields:
(str, Module): Tuple containing a name and child module
Example::
>>> # xdoctest: +SKIP("undefined vars")
>>> for name, module in model.named_children():
>>> if name in ['conv4', 'conv5']:
>>> print(module)
"""
memo = set()
for name, module in self._modules.items():
if module is not None and module not in memo:
memo.add(module)
yield name, module
def modules(self) -> Iterator['Module']:
r"""Returns an iterator over all modules in the network.
Yields:
Module: a module in the network
Note:
Duplicate modules are returned only once. In the following
example, ``l`` will be returned only once.
Example::
>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.modules()):
... print(idx, '->', m)
0 -> Sequential(
(0): Linear(in_features=2, out_features=2, bias=True)
(1): Linear(in_features=2, out_features=2, bias=True)
)
1 -> Linear(in_features=2, out_features=2, bias=True)
"""
for _, module in self.named_modules():
yield module
def named_modules(self, memo: Optional[Set['Module']] = None, prefix: str = '', remove_duplicate: bool = True):
r"""Returns an iterator over all modules in the network, yielding
both the name of the module as well as the module itself.
Args:
memo: a memo to store the set of modules already added to the result
prefix: a prefix that will be added to the name of the module
remove_duplicate: whether to remove the duplicated module instances in the result
or not
Yields:
(str, Module): Tuple of name and module
Note:
Duplicate modules are returned only once. In the following
example, ``l`` will be returned only once.
Example::
>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.named_modules()):
... print(idx, '->', m)
0 -> ('', Sequential(
(0): Linear(in_features=2, out_features=2, bias=True)
(1): Linear(in_features=2, out_features=2, bias=True)
))
1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
"""
if memo is None:
memo = set()
if self not in memo:
if remove_duplicate:
memo.add(self)
yield prefix, self
for name, module in self._modules.items():
if module is None:
continue
submodule_prefix = prefix + ('.' if prefix else '') + name
for m in module.named_modules(memo, submodule_prefix, remove_duplicate):
yield m
def train(self: T, mode: bool = True) -> T:
r"""Sets the module in training mode.
This has any effect only on certain modules. See documentations of
particular modules for details of their behaviors in training/evaluation
mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,
etc.
Args:
mode (bool): whether to set training mode (``True``) or evaluation
mode (``False``). Default: ``True``.
Returns:
Module: self
"""
if not isinstance(mode, bool):
raise ValueError("training mode is expected to be boolean")
self.training = mode
for module in self.children():
module.train(mode)
return self
def eval(self: T) -> T:
r"""Sets the module in evaluation mode.
This has any effect only on certain modules. See documentations of
particular modules for details of their behaviors in training/evaluation
mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,
etc.
This is equivalent with :meth:`self.train(False) <torch.nn.Module.train>`.
See :ref:`locally-disable-grad-doc` for a comparison between
`.eval()` and several similar mechanisms that may be confused with it.
Returns:
Module: self
"""
return self.train(False)
def requires_grad_(self: T, requires_grad: bool = True) -> T:
r"""Change if autograd should record operations on parameters in this
module.
This method sets the parameters' :attr:`requires_grad` attributes
in-place.
This method is helpful for freezing part of the module for finetuning
or training parts of a model individually (e.g., GAN training).
See :ref:`locally-disable-grad-doc` for a comparison between
`.requires_grad_()` and several similar mechanisms that may be confused with it.
Args:
requires_grad (bool): whether autograd should record operations on
parameters in this module. Default: ``True``.
Returns:
Module: self
"""
for p in self.parameters():
p.requires_grad_(requires_grad)
return self
def zero_grad(self, set_to_none: bool = False) -> None:
r"""Sets gradients of all model parameters to zero. See similar function
under :class:`torch.optim.Optimizer` for more context.
Args:
set_to_none (bool): instead of setting to zero, set the grads to None.
See :meth:`torch.optim.Optimizer.zero_grad` for details.
"""
if getattr(self, '_is_replica', False):
warnings.warn(
"Calling .zero_grad() from a module created with nn.DataParallel() has no effect. "
"The parameters are copied (in a differentiable manner) from the original module. "
"This means they are not leaf nodes in autograd and so don't accumulate gradients. "
"If you need gradients in your forward method, consider using autograd.grad instead.")
for p in self.parameters():
if p.grad is not None:
if set_to_none:
p.grad = None
else:
if p.grad.grad_fn is not None:
p.grad.detach_()
else:
p.grad.requires_grad_(False)
p.grad.zero_()
def share_memory(self: T) -> T:
r"""See :meth:`torch.Tensor.share_memory_`"""
return self._apply(lambda t: t.share_memory_())
def _get_name(self):
return self.__class__.__name__
def extra_repr(self) -> str:
r"""Set the extra representation of the module
To print customized extra information, you should re-implement
this method in your own modules. Both single-line and multi-line
strings are acceptable.
"""
return ''
def __repr__(self):
# We treat the extra repr like the sub-module, one item per line
extra_lines = []
extra_repr = self.extra_repr()
# empty string will be split into list ['']
if extra_repr:
extra_lines = extra_repr.split('\n')
child_lines = []
for key, module in self._modules.items():
mod_str = repr(module)
mod_str = _addindent(mod_str, 2)
child_lines.append('(' + key + '): ' + mod_str)
lines = extra_lines + child_lines
main_str = self._get_name() + '('
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += '\n ' + '\n '.join(lines) + '\n'
main_str += ')'
return main_str
def __dir__(self):
module_attrs = dir(self.__class__)
attrs = list(self.__dict__.keys())
parameters = list(self._parameters.keys())
modules = list(self._modules.keys())
buffers = list(self._buffers.keys())
keys = module_attrs + attrs + parameters + modules + buffers
# Eliminate attrs that are not legal Python variable names
keys = [key for key in keys if not key[0].isdigit()]
return sorted(keys)
def _replicate_for_data_parallel(self):
replica = self.__new__(type(self))
replica.__dict__ = self.__dict__.copy()
# replicas do not have parameters themselves, the replicas reference the original
# module.
replica._parameters = OrderedDict()
replica._buffers = replica._buffers.copy()
replica._modules = replica._modules.copy()
replica._is_replica = True # type: ignore[assignment]
return replica
| pytorch-master | torch/nn/modules/module.py |
from .module import Module
from .. import functional as F
from torch import Tensor
__all__ = ['Dropout', 'Dropout1d', 'Dropout2d', 'Dropout3d', 'AlphaDropout', 'FeatureAlphaDropout']
class _DropoutNd(Module):
__constants__ = ['p', 'inplace']
p: float
inplace: bool
def __init__(self, p: float = 0.5, inplace: bool = False) -> None:
super(_DropoutNd, self).__init__()
if p < 0 or p > 1:
raise ValueError("dropout probability has to be between 0 and 1, "
"but got {}".format(p))
self.p = p
self.inplace = inplace
def extra_repr(self) -> str:
return 'p={}, inplace={}'.format(self.p, self.inplace)
class Dropout(_DropoutNd):
r"""During training, randomly zeroes some of the elements of the input
tensor with probability :attr:`p` using samples from a Bernoulli
distribution. Each channel will be zeroed out independently on every forward
call.
This has proven to be an effective technique for regularization and
preventing the co-adaptation of neurons as described in the paper
`Improving neural networks by preventing co-adaptation of feature
detectors`_ .
Furthermore, the outputs are scaled by a factor of :math:`\frac{1}{1-p}` during
training. This means that during evaluation the module simply computes an
identity function.
Args:
p: probability of an element to be zeroed. Default: 0.5
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`. Input can be of any shape
- Output: :math:`(*)`. Output is of the same shape as input
Examples::
>>> m = nn.Dropout(p=0.2)
>>> input = torch.randn(20, 16)
>>> output = m(input)
.. _Improving neural networks by preventing co-adaptation of feature
detectors: https://arxiv.org/abs/1207.0580
"""
def forward(self, input: Tensor) -> Tensor:
return F.dropout(input, self.p, self.training, self.inplace)
class Dropout1d(_DropoutNd):
r"""Randomly zero out entire channels (a channel is a 1D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 1D tensor :math:`\text{input}[i, j]`).
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
Usually the input comes from :class:`nn.Conv1d` modules.
As described in the paper
`Efficient Object Localization Using Convolutional Networks`_ ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.Dropout1d` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zero-ed.
inplace (bool, optional): If set to ``True``, will do this operation
in-place
Shape:
- Input: :math:`(N, C, L)` or :math:`(C, L)`.
- Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input).
Examples::
>>> m = nn.Dropout1d(p=0.2)
>>> input = torch.randn(20, 16, 32)
>>> output = m(input)
.. _Efficient Object Localization Using Convolutional Networks:
https://arxiv.org/abs/1411.4280
"""
def forward(self, input: Tensor) -> Tensor:
return F.dropout1d(input, self.p, self.training, self.inplace)
class Dropout2d(_DropoutNd):
r"""Randomly zero out entire channels (a channel is a 2D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 2D tensor :math:`\text{input}[i, j]`).
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
Usually the input comes from :class:`nn.Conv2d` modules.
As described in the paper
`Efficient Object Localization Using Convolutional Networks`_ ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.Dropout2d` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zero-ed.
inplace (bool, optional): If set to ``True``, will do this operation
in-place
.. warning ::
Due to historical reasons, this class will perform 1D channel-wise dropout
for 3D inputs (as done by :class:`nn.Dropout1d`). Thus, it currently does NOT
support inputs without a batch dimension of shape :math:`(C, H, W)`. This
behavior will change in a future release to interpret 3D inputs as no-batch-dim
inputs. To maintain the old behavior, switch to :class:`nn.Dropout1d`.
Shape:
- Input: :math:`(N, C, H, W)` or :math:`(N, C, L)`.
- Output: :math:`(N, C, H, W)` or :math:`(N, C, L)` (same shape as input).
Examples::
>>> m = nn.Dropout2d(p=0.2)
>>> input = torch.randn(20, 16, 32, 32)
>>> output = m(input)
.. _Efficient Object Localization Using Convolutional Networks:
https://arxiv.org/abs/1411.4280
"""
def forward(self, input: Tensor) -> Tensor:
return F.dropout2d(input, self.p, self.training, self.inplace)
class Dropout3d(_DropoutNd):
r"""Randomly zero out entire channels (a channel is a 3D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 3D tensor :math:`\text{input}[i, j]`).
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
Usually the input comes from :class:`nn.Conv3d` modules.
As described in the paper
`Efficient Object Localization Using Convolutional Networks`_ ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.Dropout3d` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zeroed.
inplace (bool, optional): If set to ``True``, will do this operation
in-place
Shape:
- Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`.
- Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input).
Examples::
>>> m = nn.Dropout3d(p=0.2)
>>> input = torch.randn(20, 16, 4, 32, 32)
>>> output = m(input)
.. _Efficient Object Localization Using Convolutional Networks:
https://arxiv.org/abs/1411.4280
"""
def forward(self, input: Tensor) -> Tensor:
return F.dropout3d(input, self.p, self.training, self.inplace)
class AlphaDropout(_DropoutNd):
r"""Applies Alpha Dropout over the input.
Alpha Dropout is a type of Dropout that maintains the self-normalizing
property.
For an input with zero mean and unit standard deviation, the output of
Alpha Dropout maintains the original mean and standard deviation of the
input.
Alpha Dropout goes hand-in-hand with SELU activation function, which ensures
that the outputs have zero mean and unit standard deviation.
During training, it randomly masks some of the elements of the input
tensor with probability *p* using samples from a bernoulli distribution.
The elements to masked are randomized on every forward call, and scaled
and shifted to maintain zero mean and unit standard deviation.
During evaluation the module simply computes an identity function.
More details can be found in the paper `Self-Normalizing Neural Networks`_ .
Args:
p (float): probability of an element to be dropped. Default: 0.5
inplace (bool, optional): If set to ``True``, will do this operation
in-place
Shape:
- Input: :math:`(*)`. Input can be of any shape
- Output: :math:`(*)`. Output is of the same shape as input
Examples::
>>> m = nn.AlphaDropout(p=0.2)
>>> input = torch.randn(20, 16)
>>> output = m(input)
.. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
"""
def forward(self, input: Tensor) -> Tensor:
return F.alpha_dropout(input, self.p, self.training)
class FeatureAlphaDropout(_DropoutNd):
r"""Randomly masks out entire channels (a channel is a feature map,
e.g. the :math:`j`-th channel of the :math:`i`-th sample in the batch input
is a tensor :math:`\text{input}[i, j]`) of the input tensor). Instead of
setting activations to zero, as in regular Dropout, the activations are set
to the negative saturation value of the SELU activation function. More details
can be found in the paper `Self-Normalizing Neural Networks`_ .
Each element will be masked independently for each sample on every forward
call with probability :attr:`p` using samples from a Bernoulli distribution.
The elements to be masked are randomized on every forward call, and scaled
and shifted to maintain zero mean and unit variance.
Usually the input comes from :class:`nn.AlphaDropout` modules.
As described in the paper
`Efficient Object Localization Using Convolutional Networks`_ ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.AlphaDropout` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zeroed. Default: 0.5
inplace (bool, optional): If set to ``True``, will do this operation
in-place
Shape:
- Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`.
- Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input).
Examples::
>>> m = nn.FeatureAlphaDropout(p=0.2)
>>> input = torch.randn(20, 16, 4, 32, 32)
>>> output = m(input)
.. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
.. _Efficient Object Localization Using Convolutional Networks:
https://arxiv.org/abs/1411.4280
"""
def forward(self, input: Tensor) -> Tensor:
return F.feature_alpha_dropout(input, self.p, self.training)
| pytorch-master | torch/nn/modules/dropout.py |
# -*- coding: utf-8 -*-
import math
import warnings
import torch
from torch import Tensor
from torch.nn.parameter import Parameter, UninitializedParameter
from .. import functional as F
from .. import init
from .lazy import LazyModuleMixin
from .module import Module
from .utils import _single, _pair, _triple, _reverse_repeat_tuple
from torch._torch_docs import reproducibility_notes
from ..common_types import _size_1_t, _size_2_t, _size_3_t
from typing import Optional, List, Tuple, Union
__all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d',
'LazyConv1d', 'LazyConv2d', 'LazyConv3d', 'LazyConvTranspose1d', 'LazyConvTranspose2d',
'LazyConvTranspose3d']
convolution_notes = \
{"groups_note": r"""* :attr:`groups` controls the connections between inputs and outputs.
:attr:`in_channels` and :attr:`out_channels` must both be divisible by
:attr:`groups`. For example,
* At groups=1, all inputs are convolved to all outputs.
* At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels
and producing half the output channels, and both subsequently
concatenated.
* At groups= :attr:`in_channels`, each input channel is convolved with
its own set of filters (of size
:math:`\frac{\text{out\_channels}}{\text{in\_channels}}`).""",
"depthwise_separable_note": r"""When `groups == in_channels` and `out_channels == K * in_channels`,
where `K` is a positive integer, this operation is also known as a "depthwise convolution".
In other words, for an input of size :math:`(N, C_{in}, L_{in})`,
a depthwise convolution with a depthwise multiplier `K` can be performed with the arguments
:math:`(C_\text{in}=C_\text{in}, C_\text{out}=C_\text{in} \times \text{K}, ..., \text{groups}=C_\text{in})`."""} # noqa: B950
class _ConvNd(Module):
__constants__ = ['stride', 'padding', 'dilation', 'groups',
'padding_mode', 'output_padding', 'in_channels',
'out_channels', 'kernel_size']
__annotations__ = {'bias': Optional[torch.Tensor]}
def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor:
...
_in_channels: int
_reversed_padding_repeated_twice: List[int]
out_channels: int
kernel_size: Tuple[int, ...]
stride: Tuple[int, ...]
padding: Union[str, Tuple[int, ...]]
dilation: Tuple[int, ...]
transposed: bool
output_padding: Tuple[int, ...]
groups: int
padding_mode: str
weight: Tensor
bias: Optional[Tensor]
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Tuple[int, ...],
stride: Tuple[int, ...],
padding: Tuple[int, ...],
dilation: Tuple[int, ...],
transposed: bool,
output_padding: Tuple[int, ...],
groups: int,
bias: bool,
padding_mode: str,
device=None,
dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(_ConvNd, self).__init__()
if groups <= 0:
raise ValueError('groups must be a positive integer')
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
valid_padding_strings = {'same', 'valid'}
if isinstance(padding, str):
if padding not in valid_padding_strings:
raise ValueError(
"Invalid padding string {!r}, should be one of {}".format(
padding, valid_padding_strings))
if padding == 'same' and any(s != 1 for s in stride):
raise ValueError("padding='same' is not supported for strided convolutions")
valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
if padding_mode not in valid_padding_modes:
raise ValueError("padding_mode must be one of {}, but got padding_mode='{}'".format(
valid_padding_modes, padding_mode))
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
self.padding_mode = padding_mode
# `_reversed_padding_repeated_twice` is the padding to be passed to
# `F.pad` if needed (e.g., for non-zero padding types that are
# implemented as two ops: padding + conv). `F.pad` accepts paddings in
# reverse order than the dimension.
if isinstance(self.padding, str):
self._reversed_padding_repeated_twice = [0, 0] * len(kernel_size)
if padding == 'same':
for d, k, i in zip(dilation, kernel_size,
range(len(kernel_size) - 1, -1, -1)):
total_padding = d * (k - 1)
left_pad = total_padding // 2
self._reversed_padding_repeated_twice[2 * i] = left_pad
self._reversed_padding_repeated_twice[2 * i + 1] = (
total_padding - left_pad)
else:
self._reversed_padding_repeated_twice = _reverse_repeat_tuple(self.padding, 2)
if transposed:
self.weight = Parameter(torch.empty(
(in_channels, out_channels // groups, *kernel_size), **factory_kwargs))
else:
self.weight = Parameter(torch.empty(
(out_channels, in_channels // groups, *kernel_size), **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(out_channels, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
# Setting a=sqrt(5) in kaiming_uniform is the same as initializing with
# uniform(-1/sqrt(k), 1/sqrt(k)), where k = weight.size(1) * prod(*kernel_size)
# For more details see: https://github.com/pytorch/pytorch/issues/15314#issuecomment-477448573
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
if fan_in != 0:
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
if self.padding_mode != 'zeros':
s += ', padding_mode={padding_mode}'
return s.format(**self.__dict__)
def __setstate__(self, state):
super(_ConvNd, self).__setstate__(state)
if not hasattr(self, 'padding_mode'):
self.padding_mode = 'zeros'
class Conv1d(_ConvNd):
__doc__ = r"""Applies a 1D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size
:math:`(N, C_{\text{in}}, L)` and output :math:`(N, C_{\text{out}}, L_{\text{out}})` can be
precisely described as:
.. math::
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
\sum_{k = 0}^{C_{in} - 1} \text{weight}(C_{\text{out}_j}, k)
\star \text{input}(N_i, k)
where :math:`\star` is the valid `cross-correlation`_ operator,
:math:`N` is a batch size, :math:`C` denotes a number of channels,
:math:`L` is a length of signal sequence.
""" + r"""
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
* :attr:`stride` controls the stride for the cross-correlation, a single
number or a one-element tuple.
* :attr:`padding` controls the amount of padding applied to the input. It
can be either a string {{'valid', 'same'}} or a tuple of ints giving the
amount of implicit padding applied on both sides.
* :attr:`dilation` controls the spacing between the kernel points; also
known as the à trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
{groups_note}
Note:
{depthwise_separable_note}
Note:
{cudnn_reproducibility_note}
Note:
``padding='valid'`` is the same as no padding. ``padding='same'`` pads
the input so the output has the shape as the input. However, this mode
doesn't support any stride values other than 1.
Note:
This module supports complex data types i.e. ``complex32, complex64, complex128``.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int, tuple or str, optional): Padding added to both sides of
the input. Default: 0
padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'zeros'``
dilation (int or tuple, optional): Spacing between kernel
elements. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the
output. Default: ``True``
""".format(**reproducibility_notes, **convolution_notes) + r"""
Shape:
- Input: :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`
- Output: :math:`(N, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`, where
.. math::
L_{out} = \left\lfloor\frac{L_{in} + 2 \times \text{padding} - \text{dilation}
\times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\text{out\_channels},
\frac{\text{in\_channels}}{\text{groups}}, \text{kernel\_size})`.
The values of these weights are sampled from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{in} * \text{kernel\_size}}`
bias (Tensor): the learnable bias of the module of shape
(out_channels). If :attr:`bias` is ``True``, then the values of these weights are
sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{in} * \text{kernel\_size}}`
Examples::
>>> m = nn.Conv1d(16, 33, 3, stride=2)
>>> input = torch.randn(20, 16, 50)
>>> output = m(input)
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: Union[str, _size_1_t] = 0,
dilation: _size_1_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros', # TODO: refine this type
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
# we create new variables below to make mypy happy since kernel_size has
# type Union[int, Tuple[int]] and kernel_size_ has type Tuple[int]
kernel_size_ = _single(kernel_size)
stride_ = _single(stride)
padding_ = padding if isinstance(padding, str) else _single(padding)
dilation_ = _single(dilation)
super(Conv1d, self).__init__(
in_channels, out_channels, kernel_size_, stride_, padding_, dilation_,
False, _single(0), groups, bias, padding_mode, **factory_kwargs)
def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
if self.padding_mode != 'zeros':
return F.conv1d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, bias, self.stride,
_single(0), self.dilation, self.groups)
return F.conv1d(input, weight, bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
return self._conv_forward(input, self.weight, self.bias)
class Conv2d(_ConvNd):
__doc__ = r"""Applies a 2D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size
:math:`(N, C_{\text{in}}, H, W)` and output :math:`(N, C_{\text{out}}, H_{\text{out}}, W_{\text{out}})`
can be precisely described as:
.. math::
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
\sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k)
where :math:`\star` is the valid 2D `cross-correlation`_ operator,
:math:`N` is a batch size, :math:`C` denotes a number of channels,
:math:`H` is a height of input planes in pixels, and :math:`W` is
width in pixels.
""" + r"""
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
* :attr:`stride` controls the stride for the cross-correlation, a single
number or a tuple.
* :attr:`padding` controls the amount of padding applied to the input. It
can be either a string {{'valid', 'same'}} or a tuple of ints giving the
amount of implicit padding applied on both sides.
* :attr:`dilation` controls the spacing between the kernel points; also
known as the à trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
{groups_note}
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
Note:
{depthwise_separable_note}
Note:
{cudnn_reproducibility_note}
Note:
``padding='valid'`` is the same as no padding. ``padding='same'`` pads
the input so the output has the shape as the input. However, this mode
doesn't support any stride values other than 1.
Note:
This module supports complex data types i.e. ``complex32, complex64, complex128``.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int, tuple or str, optional): Padding added to all four sides of
the input. Default: 0
padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'zeros'``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the
output. Default: ``True``
""".format(**reproducibility_notes, **convolution_notes) + r"""
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(C_{out}, H_{out}, W_{out})`, where
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] - \text{dilation}[0]
\times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] - \text{dilation}[1]
\times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},`
:math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`.
The values of these weights are sampled from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
bias (Tensor): the learnable bias of the module of shape
(out_channels). If :attr:`bias` is ``True``,
then the values of these weights are
sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
Examples:
>>> # With square kernels and equal stride
>>> m = nn.Conv2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> # non-square kernels and unequal stride and with padding and dilation
>>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
>>> input = torch.randn(20, 16, 50, 100)
>>> output = m(input)
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: Union[str, _size_2_t] = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros', # TODO: refine this type
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size_ = _pair(kernel_size)
stride_ = _pair(stride)
padding_ = padding if isinstance(padding, str) else _pair(padding)
dilation_ = _pair(dilation)
super(Conv2d, self).__init__(
in_channels, out_channels, kernel_size_, stride_, padding_, dilation_,
False, _pair(0), groups, bias, padding_mode, **factory_kwargs)
def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
return self._conv_forward(input, self.weight, self.bias)
class Conv3d(_ConvNd):
__doc__ = r"""Applies a 3D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C_{in}, D, H, W)`
and output :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` can be precisely described as:
.. math::
out(N_i, C_{out_j}) = bias(C_{out_j}) +
\sum_{k = 0}^{C_{in} - 1} weight(C_{out_j}, k) \star input(N_i, k)
where :math:`\star` is the valid 3D `cross-correlation`_ operator
""" + r"""
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
* :attr:`stride` controls the stride for the cross-correlation.
* :attr:`padding` controls the amount of padding applied to the input. It
can be either a string {{'valid', 'same'}} or a tuple of ints giving the
amount of implicit padding applied on both sides.
* :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
{groups_note}
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimension
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
Note:
{depthwise_separable_note}
Note:
{cudnn_reproducibility_note}
Note:
``padding='valid'`` is the same as no padding. ``padding='same'`` pads
the input so the output has the shape as the input. However, this mode
doesn't support any stride values other than 1.
Note:
This module supports complex data types i.e. ``complex32, complex64, complex128``.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int, tuple or str, optional): Padding added to all six sides of
the input. Default: 0
padding_mode (str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
""".format(**reproducibility_notes, **convolution_notes) + r"""
Shape:
- Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` or :math:`(C_{in}, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` or :math:`(C_{out}, D_{out}, H_{out}, W_{out})`,
where
.. math::
D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0]
\times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1]
\times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2]
\times (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},`
:math:`\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})`.
The values of these weights are sampled from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``,
then the values of these weights are
sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
Examples::
>>> # With square kernels and equal stride
>>> m = nn.Conv3d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))
>>> input = torch.randn(20, 16, 10, 50, 100)
>>> output = m(input)
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_3_t,
stride: _size_3_t = 1,
padding: Union[str, _size_3_t] = 0,
dilation: _size_3_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size_ = _triple(kernel_size)
stride_ = _triple(stride)
padding_ = padding if isinstance(padding, str) else _triple(padding)
dilation_ = _triple(dilation)
super(Conv3d, self).__init__(
in_channels, out_channels, kernel_size_, stride_, padding_, dilation_,
False, _triple(0), groups, bias, padding_mode, **factory_kwargs)
def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
if self.padding_mode != "zeros":
return F.conv3d(
F.pad(
input, self._reversed_padding_repeated_twice, mode=self.padding_mode
),
weight,
bias,
self.stride,
_triple(0),
self.dilation,
self.groups,
)
return F.conv3d(
input, weight, bias, self.stride, self.padding, self.dilation, self.groups
)
def forward(self, input: Tensor) -> Tensor:
return self._conv_forward(input, self.weight, self.bias)
class _ConvTransposeNd(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding,
groups, bias, padding_mode, device=None, dtype=None) -> None:
if padding_mode != 'zeros':
raise ValueError('Only "zeros" padding mode is supported for {}'.format(self.__class__.__name__))
factory_kwargs = {'device': device, 'dtype': dtype}
super(_ConvTransposeNd, self).__init__(
in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding,
groups, bias, padding_mode, **factory_kwargs)
# dilation being an optional parameter is for backwards
# compatibility
def _output_padding(self, input: Tensor, output_size: Optional[List[int]],
stride: List[int], padding: List[int], kernel_size: List[int],
num_spatial_dims: int, dilation: Optional[List[int]] = None) -> List[int]:
if output_size is None:
ret = _single(self.output_padding) # converting to list if was not already
else:
has_batch_dim = input.dim() == num_spatial_dims + 2
num_non_spatial_dims = 2 if has_batch_dim else 1
if len(output_size) == num_non_spatial_dims + num_spatial_dims:
output_size = output_size[num_non_spatial_dims:]
if len(output_size) != num_spatial_dims:
raise ValueError(
"ConvTranspose{}D: for {}D input, output_size must have {} or {} elements (got {})"
.format(num_spatial_dims, input.dim(), num_spatial_dims,
num_non_spatial_dims + num_spatial_dims, len(output_size)))
min_sizes = torch.jit.annotate(List[int], [])
max_sizes = torch.jit.annotate(List[int], [])
for d in range(num_spatial_dims):
dim_size = ((input.size(d + num_non_spatial_dims) - 1) * stride[d] -
2 * padding[d] +
(dilation[d] if dilation is not None else 1) * (kernel_size[d] - 1) + 1)
min_sizes.append(dim_size)
max_sizes.append(min_sizes[d] + stride[d] - 1)
for i in range(len(output_size)):
size = output_size[i]
min_size = min_sizes[i]
max_size = max_sizes[i]
if size < min_size or size > max_size:
raise ValueError((
"requested an output size of {}, but valid sizes range "
"from {} to {} (for an input of {})").format(
output_size, min_sizes, max_sizes, input.size()[2:]))
res = torch.jit.annotate(List[int], [])
for d in range(num_spatial_dims):
res.append(output_size[d] - min_sizes[d])
ret = res
return ret
class ConvTranspose1d(_ConvTransposeNd):
__doc__ = r"""Applies a 1D transposed convolution operator over an input image
composed of several input planes.
This module can be seen as the gradient of Conv1d with respect to its input.
It is also known as a fractionally-strided convolution or
a deconvolution (although it is not an actual deconvolution operation as it does
not compute a true inverse of convolution). For more information, see the visualizations
`here`_ and the `Deconvolutional Networks`_ paper.
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
* :attr:`stride` controls the stride for the cross-correlation.
* :attr:`padding` controls the amount of implicit zero padding on both
sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
below for details.
* :attr:`output_padding` controls the additional size added to one side
of the output shape. See note below for details.
* :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
{groups_note}
Note:
The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
amount of zero padding to both sizes of the input. This is set so that
when a :class:`~torch.nn.Conv1d` and a :class:`~torch.nn.ConvTranspose1d`
are initialized with same parameters, they are inverses of each other in
regard to the input and output shapes. However, when ``stride > 1``,
:class:`~torch.nn.Conv1d` maps multiple input shapes to the same output
shape. :attr:`output_padding` is provided to resolve this ambiguity by
effectively increasing the calculated output shape on one side. Note
that :attr:`output_padding` is only used to find output shape, but does
not actually add zero-padding to output.
Note:
In some circumstances when using the CUDA backend with CuDNN, this operator
may select a nondeterministic algorithm to increase performance. If this is
undesirable, you can try to make the operation deterministic (potentially at
a performance cost) by setting ``torch.backends.cudnn.deterministic =
True``.
Please see the notes on :doc:`/notes/randomness` for background.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
""".format(**reproducibility_notes, **convolution_notes) + r"""
Shape:
- Input: :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`
- Output: :math:`(N, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`, where
.. math::
L_{out} = (L_{in} - 1) \times \text{stride} - 2 \times \text{padding} + \text{dilation}
\times (\text{kernel\_size} - 1) + \text{output\_padding} + 1
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
:math:`\text{kernel\_size})`.
The values of these weights are sampled from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \text{kernel\_size}}`
bias (Tensor): the learnable bias of the module of shape (out_channels).
If :attr:`bias` is ``True``, then the values of these weights are
sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \text{kernel\_size}}`
.. _`here`:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
.. _`Deconvolutional Networks`:
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: _size_1_t = 0,
output_padding: _size_1_t = 0,
groups: int = 1,
bias: bool = True,
dilation: _size_1_t = 1,
padding_mode: str = 'zeros',
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
output_padding = _single(output_padding)
super(ConvTranspose1d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias, padding_mode, **factory_kwargs)
def forward(self, input: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
if self.padding_mode != 'zeros':
raise ValueError('Only `zeros` padding mode is supported for ConvTranspose1d')
assert isinstance(self.padding, tuple)
# One cannot replace List by Tuple or Sequence in "_output_padding" because
# TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
num_spatial_dims = 1
output_padding = self._output_padding(
input, output_size, self.stride, self.padding, self.kernel_size, # type: ignore[arg-type]
num_spatial_dims, self.dilation) # type: ignore[arg-type]
return F.conv_transpose1d(
input, self.weight, self.bias, self.stride, self.padding,
output_padding, self.groups, self.dilation)
class ConvTranspose2d(_ConvTransposeNd):
__doc__ = r"""Applies a 2D transposed convolution operator over an input image
composed of several input planes.
This module can be seen as the gradient of Conv2d with respect to its input.
It is also known as a fractionally-strided convolution or
a deconvolution (although it is not an actual deconvolution operation as it does
not compute a true inverse of convolution). For more information, see the visualizations
`here`_ and the `Deconvolutional Networks`_ paper.
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
* :attr:`stride` controls the stride for the cross-correlation.
* :attr:`padding` controls the amount of implicit zero padding on both
sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
below for details.
* :attr:`output_padding` controls the additional size added to one side
of the output shape. See note below for details.
* :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
{groups_note}
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding`
can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimensions
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
Note:
The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
amount of zero padding to both sizes of the input. This is set so that
when a :class:`~torch.nn.Conv2d` and a :class:`~torch.nn.ConvTranspose2d`
are initialized with same parameters, they are inverses of each other in
regard to the input and output shapes. However, when ``stride > 1``,
:class:`~torch.nn.Conv2d` maps multiple input shapes to the same output
shape. :attr:`output_padding` is provided to resolve this ambiguity by
effectively increasing the calculated output shape on one side. Note
that :attr:`output_padding` is only used to find output shape, but does
not actually add zero-padding to output.
Note:
{cudnn_reproducibility_note}
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of each dimension in the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
""".format(**reproducibility_notes, **convolution_notes) + r"""
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(C_{out}, H_{out}, W_{out})`, where
.. math::
H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0]
\times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1
.. math::
W_{out} = (W_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1]
\times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
:math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`.
The values of these weights are sampled from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
bias (Tensor): the learnable bias of the module of shape (out_channels)
If :attr:`bias` is ``True``, then the values of these weights are
sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
Examples::
>>> # With square kernels and equal stride
>>> m = nn.ConvTranspose2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> input = torch.randn(20, 16, 50, 100)
>>> output = m(input)
>>> # exact output size can be also specified as an argument
>>> input = torch.randn(1, 16, 12, 12)
>>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1)
>>> upsample = nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(input)
>>> h.size()
torch.Size([1, 16, 6, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12, 12])
.. _`here`:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
.. _`Deconvolutional Networks`:
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
output_padding: _size_2_t = 0,
groups: int = 1,
bias: bool = True,
dilation: _size_2_t = 1,
padding_mode: str = 'zeros',
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
output_padding = _pair(output_padding)
super(ConvTranspose2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias, padding_mode, **factory_kwargs)
def forward(self, input: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
if self.padding_mode != 'zeros':
raise ValueError('Only `zeros` padding mode is supported for ConvTranspose2d')
assert isinstance(self.padding, tuple)
# One cannot replace List by Tuple or Sequence in "_output_padding" because
# TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
num_spatial_dims = 2
output_padding = self._output_padding(
input, output_size, self.stride, self.padding, self.kernel_size, # type: ignore[arg-type]
num_spatial_dims, self.dilation) # type: ignore[arg-type]
return F.conv_transpose2d(
input, self.weight, self.bias, self.stride, self.padding,
output_padding, self.groups, self.dilation)
class ConvTranspose3d(_ConvTransposeNd):
__doc__ = r"""Applies a 3D transposed convolution operator over an input image composed of several input
planes.
The transposed convolution operator multiplies each input value element-wise by a learnable kernel,
and sums over the outputs from all input feature planes.
This module can be seen as the gradient of Conv3d with respect to its input.
It is also known as a fractionally-strided convolution or
a deconvolution (although it is not an actual deconvolution operation as it does
not compute a true inverse of convolution). For more information, see the visualizations
`here`_ and the `Deconvolutional Networks`_ paper.
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
* :attr:`stride` controls the stride for the cross-correlation.
* :attr:`padding` controls the amount of implicit zero padding on both
sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
below for details.
* :attr:`output_padding` controls the additional size added to one side
of the output shape. See note below for details.
* :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
{groups_note}
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding`
can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimensions
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
Note:
The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
amount of zero padding to both sizes of the input. This is set so that
when a :class:`~torch.nn.Conv3d` and a :class:`~torch.nn.ConvTranspose3d`
are initialized with same parameters, they are inverses of each other in
regard to the input and output shapes. However, when ``stride > 1``,
:class:`~torch.nn.Conv3d` maps multiple input shapes to the same output
shape. :attr:`output_padding` is provided to resolve this ambiguity by
effectively increasing the calculated output shape on one side. Note
that :attr:`output_padding` is only used to find output shape, but does
not actually add zero-padding to output.
Note:
{cudnn_reproducibility_note}
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of each dimension in the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
""".format(**reproducibility_notes, **convolution_notes) + r"""
Shape:
- Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` or :math:`(C_{in}, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` or
:math:`(C_{out}, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = (D_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0]
\times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1
.. math::
H_{out} = (H_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1]
\times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1
.. math::
W_{out} = (W_{in} - 1) \times \text{stride}[2] - 2 \times \text{padding}[2] + \text{dilation}[2]
\times (\text{kernel\_size}[2] - 1) + \text{output\_padding}[2] + 1
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
:math:`\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})`.
The values of these weights are sampled from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
bias (Tensor): the learnable bias of the module of shape (out_channels)
If :attr:`bias` is ``True``, then the values of these weights are
sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
Examples::
>>> # With square kernels and equal stride
>>> m = nn.ConvTranspose3d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.ConvTranspose3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(0, 4, 2))
>>> input = torch.randn(20, 16, 10, 50, 100)
>>> output = m(input)
.. _`here`:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
.. _`Deconvolutional Networks`:
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_3_t,
stride: _size_3_t = 1,
padding: _size_3_t = 0,
output_padding: _size_3_t = 0,
groups: int = 1,
bias: bool = True,
dilation: _size_3_t = 1,
padding_mode: str = 'zeros',
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
output_padding = _triple(output_padding)
super(ConvTranspose3d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias, padding_mode, **factory_kwargs)
def forward(self, input: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
if self.padding_mode != 'zeros':
raise ValueError('Only `zeros` padding mode is supported for ConvTranspose3d')
assert isinstance(self.padding, tuple)
# One cannot replace List by Tuple or Sequence in "_output_padding" because
# TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
num_spatial_dims = 3
output_padding = self._output_padding(
input, output_size, self.stride, self.padding, self.kernel_size, # type: ignore[arg-type]
num_spatial_dims, self.dilation) # type: ignore[arg-type]
return F.conv_transpose3d(
input, self.weight, self.bias, self.stride, self.padding,
output_padding, self.groups, self.dilation)
# TODO: Deprecate and remove the following alias `_ConvTransposeMixin`.
#
# `_ConvTransposeMixin` was a mixin that was removed. It is meant to be used
# with `_ConvNd` to construct actual module classes that implements conv
# transpose ops:
#
# class MyConvTranspose(_ConvNd, _ConvTransposeMixin):
# ...
#
# In PyTorch, it has been replaced by `_ConvTransposeNd`, which is a proper
# subclass of `_ConvNd`. However, some user code in the wild still (incorrectly)
# use the internal class `_ConvTransposeMixin`. Hence, we provide this alias
# for BC, because it is cheap and easy for us to do so, even though that
# `_ConvTransposeNd` is really not a mixin anymore (but multiple inheritance as
# above would still work).
class _ConvTransposeMixin(_ConvTransposeNd):
def __init__(self, *args, **kwargs):
warnings.warn(
"_ConvTransposeMixin is a deprecated internal class. "
"Please consider using public APIs.")
super(_ConvTransposeMixin, self).__init__(*args, **kwargs)
# TODO: Conv2dLocal
# TODO: Conv2dMap
# TODO: ConvTranspose2dMap
class _LazyConvXdMixin(LazyModuleMixin):
groups: int
transposed: bool
in_channels: int
out_channels: int
kernel_size: Tuple[int, ...]
weight: UninitializedParameter
bias: UninitializedParameter
def reset_parameters(self) -> None:
# has_uninitialized_params is defined in parent class and it is using a protocol on self
if not self.has_uninitialized_params() and self.in_channels != 0: # type: ignore[misc]
# "type:ignore[..]" is required because mypy thinks that "reset_parameters" is undefined
# in super class. Turns out that it is defined in _ConvND which is inherited by any class
# that also inherits _LazyConvXdMixin
super().reset_parameters() # type: ignore[misc]
# Signature of "initialize_parameters" is incompatible with the definition in supertype LazyModuleMixin
def initialize_parameters(self, input) -> None: # type: ignore[override]
# defined by parent class but using a protocol
if self.has_uninitialized_params(): # type: ignore[misc]
self.in_channels = self._get_in_channels(input)
if self.in_channels % self.groups != 0:
raise ValueError('in_channels must be divisible by groups')
assert isinstance(self.weight, UninitializedParameter)
if self.transposed:
self.weight.materialize((
self.in_channels, self.out_channels // self.groups, *self.kernel_size))
else:
self.weight.materialize((
self.out_channels, self.in_channels // self.groups, *self.kernel_size))
if self.bias is not None:
assert isinstance(self.bias, UninitializedParameter)
self.bias.materialize((self.out_channels,))
self.reset_parameters()
# Function to extract in_channels from first input.
def _get_in_channels(self, input: Tensor) -> int:
num_spatial_dims = self._get_num_spatial_dims()
num_dims_no_batch = num_spatial_dims + 1 # +1 for channels dim
num_dims_batch = num_dims_no_batch + 1
if input.dim() not in (num_dims_no_batch, num_dims_batch):
raise RuntimeError("Expected {}D (unbatched) or {}D (batched) input to {}, but "
"got input of size: {}".format(num_dims_no_batch, num_dims_batch,
self.__class__.__name__, input.shape))
return input.shape[1] if input.dim() == num_dims_batch else input.shape[0]
# Function to return the number of spatial dims expected for inputs to the module.
# This is expected to be implemented by subclasses.
def _get_num_spatial_dims(self) -> int:
raise NotImplementedError()
# LazyConv1d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConv1d(_LazyConvXdMixin, Conv1d): # type: ignore[misc]
r"""A :class:`torch.nn.Conv1d` module with lazy initialization of
the ``in_channels`` argument of the :class:`Conv1d` that is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of
the input. Default: 0
padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'zeros'``
dilation (int or tuple, optional): Spacing between kernel
elements. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the
output. Default: ``True``
.. seealso:: :class:`torch.nn.Conv1d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
"""
# super class define this variable as None. "type: ignore[..] is required
# since we are redefining the variable.
cls_to_become = Conv1d # type: ignore[assignment]
def __init__(
self,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: _size_1_t = 0,
dilation: _size_1_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__(
0,
0,
kernel_size,
stride,
padding,
dilation,
groups,
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
False,
padding_mode,
**factory_kwargs
)
self.weight = UninitializedParameter(**factory_kwargs)
self.out_channels = out_channels
if bias:
self.bias = UninitializedParameter(**factory_kwargs)
def _get_num_spatial_dims(self) -> int:
return 1
# LazyConv2d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConv2d(_LazyConvXdMixin, Conv2d): # type: ignore[misc]
r"""A :class:`torch.nn.Conv2d` module with lazy initialization of
the ``in_channels`` argument of the :class:`Conv2d` that is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of
the input. Default: 0
padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'zeros'``
dilation (int or tuple, optional): Spacing between kernel
elements. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the
output. Default: ``True``
.. seealso:: :class:`torch.nn.Conv2d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
"""
# super class define this variable as None. "type: ignore[..] is required
# since we are redefining the variable.
cls_to_become = Conv2d # type: ignore[assignment]
def __init__(
self,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros', # TODO: refine this type
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__(
0,
0,
kernel_size,
stride,
padding,
dilation,
groups,
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
False,
padding_mode,
**factory_kwargs
)
self.weight = UninitializedParameter(**factory_kwargs)
self.out_channels = out_channels
if bias:
self.bias = UninitializedParameter(**factory_kwargs)
def _get_num_spatial_dims(self) -> int:
return 2
# LazyConv3d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConv3d(_LazyConvXdMixin, Conv3d): # type: ignore[misc]
r"""A :class:`torch.nn.Conv3d` module with lazy initialization of
the ``in_channels`` argument of the :class:`Conv3d` that is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of
the input. Default: 0
padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'zeros'``
dilation (int or tuple, optional): Spacing between kernel
elements. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the
output. Default: ``True``
.. seealso:: :class:`torch.nn.Conv3d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
"""
# super class define this variable as None. "type: ignore[..] is required
# since we are redefining the variable.
cls_to_become = Conv3d # type: ignore[assignment]
def __init__(
self,
out_channels: int,
kernel_size: _size_3_t,
stride: _size_3_t = 1,
padding: _size_3_t = 0,
dilation: _size_3_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__(
0,
0,
kernel_size,
stride,
padding,
dilation,
groups,
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
False,
padding_mode,
**factory_kwargs
)
self.weight = UninitializedParameter(**factory_kwargs)
self.out_channels = out_channels
if bias:
self.bias = UninitializedParameter(**factory_kwargs)
def _get_num_spatial_dims(self) -> int:
return 3
# LazyConvTranspose1d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConvTranspose1d(_LazyConvXdMixin, ConvTranspose1d): # type: ignore[misc]
r"""A :class:`torch.nn.ConvTranspose1d` module with lazy initialization of
the ``in_channels`` argument of the :class:`ConvTranspose1d` that is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
.. seealso:: :class:`torch.nn.ConvTranspose1d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
"""
# super class define this variable as None. "type: ignore[..] is required
# since we are redefining the variable.
cls_to_become = ConvTranspose1d # type: ignore[assignment]
def __init__(
self,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: _size_1_t = 0,
output_padding: _size_1_t = 0,
groups: int = 1,
bias: bool = True,
dilation: _size_1_t = 1,
padding_mode: str = 'zeros',
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__(
0,
0,
kernel_size,
stride,
padding,
output_padding,
groups,
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
False,
dilation,
padding_mode,
**factory_kwargs
)
self.weight = UninitializedParameter(**factory_kwargs)
self.out_channels = out_channels
if bias:
self.bias = UninitializedParameter(**factory_kwargs)
def _get_num_spatial_dims(self) -> int:
return 1
# LazyConvTranspose2d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConvTranspose2d(_LazyConvXdMixin, ConvTranspose2d): # type: ignore[misc]
r"""A :class:`torch.nn.ConvTranspose2d` module with lazy initialization of
the ``in_channels`` argument of the :class:`ConvTranspose2d` that is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of each dimension in the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
.. seealso:: :class:`torch.nn.ConvTranspose2d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
"""
# super class define this variable as None. "type: ignore[..] is required
# since we are redefining the variable.
cls_to_become = ConvTranspose2d # type: ignore[assignment]
def __init__(
self,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
output_padding: _size_2_t = 0,
groups: int = 1,
bias: bool = True,
dilation: int = 1,
padding_mode: str = 'zeros',
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__(
0,
0,
kernel_size,
stride,
padding,
output_padding,
groups,
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
False,
dilation,
padding_mode,
**factory_kwargs
)
self.weight = UninitializedParameter(**factory_kwargs)
self.out_channels = out_channels
if bias:
self.bias = UninitializedParameter(**factory_kwargs)
def _get_num_spatial_dims(self) -> int:
return 2
# LazyConvTranspose3d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConvTranspose3d(_LazyConvXdMixin, ConvTranspose3d): # type: ignore[misc]
r"""A :class:`torch.nn.ConvTranspose3d` module with lazy initialization of
the ``in_channels`` argument of the :class:`ConvTranspose3d` that is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
will be added to both sides of each dimension in the input. Default: 0
output_padding (int or tuple, optional): Additional size added to one side
of each dimension in the output shape. Default: 0
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
.. seealso:: :class:`torch.nn.ConvTranspose3d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
"""
# super class define this variable as None. "type: ignore[..] is required
# since we are redefining the variable.
cls_to_become = ConvTranspose3d # type: ignore[assignment]
def __init__(
self,
out_channels: int,
kernel_size: _size_3_t,
stride: _size_3_t = 1,
padding: _size_3_t = 0,
output_padding: _size_3_t = 0,
groups: int = 1,
bias: bool = True,
dilation: _size_3_t = 1,
padding_mode: str = 'zeros',
device=None,
dtype=None
) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__(
0,
0,
kernel_size,
stride,
padding,
output_padding,
groups,
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
False,
dilation,
padding_mode,
**factory_kwargs
)
self.weight = UninitializedParameter(**factory_kwargs)
self.out_channels = out_channels
if bias:
self.bias = UninitializedParameter(**factory_kwargs)
def _get_num_spatial_dims(self) -> int:
return 3
| pytorch-master | torch/nn/modules/conv.py |
import itertools
from typing_extensions import Protocol
import warnings
import torch
from ..parameter import is_lazy
__all__ = ['LazyModuleMixin']
class _LazyProtocol(Protocol):
"""This is to avoid errors with mypy checks for
The attributes in a mixin:
https://mypy.readthedocs.io/en/latest/more_types.html#mixin-classes
"""
def _register_load_state_dict_pre_hook(self, hook):
...
def register_forward_pre_hook(self, hook):
...
def _lazy_load_hook(
self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
...
def _get_name(self):
...
def _infer_parameters(self, module, input):
...
@property
def _parameters(self):
...
@property
def _buffers(self):
...
@property
def _non_persistent_buffers_set(self):
...
@property
def _load_hook(self):
...
@property
def _initialize_hook(self):
...
class LazyModuleMixin:
r"""A mixin for modules that lazily initialize parameters, also known as "lazy modules."
.. warning:
Lazy modules are an experimental new feature under active development,
and their API is likely to change.
Modules that lazily initialize parameters, or "lazy modules",
derive the shapes of their parameters from the first input(s)
to their forward method. Until that first forward they contain
:class:`torch.nn.UninitializedParameter` s that should not be accessed
or used, and afterward they contain regular :class:`torch.nn.Parameter` s.
Lazy modules are convenient since they don't require computing some
module arguments, like the :attr:`in_features` argument of a
typical :class:`torch.nn.Linear`.
After construction, networks with lazy modules should first
be converted to the desired dtype and placed on the expected device.
This is because lazy modules only perform shape inference so the usual dtype
and device placement behavior applies.
The lazy modules should then perform "dry runs" to initialize all the components in the module.
These "dry runs" send inputs of the correct size, dtype, and device through
the network and to each one of its lazy modules. After this the network can be used as usual.
>>> # xdoctest: +SKIP
>>> class LazyMLP(torch.nn.Module):
... def __init__(self):
... super().__init__()
... self.fc1 = torch.nn.LazyLinear(10)
... self.relu1 = torch.nn.ReLU()
... self.fc2 = torch.nn.LazyLinear(1)
... self.relu2 = torch.nn.ReLU()
...
... def forward(self, input):
... x = self.relu1(self.fc1(input))
... y = self.relu2(self.fc2(x))
... return y
>>> # constructs a network with lazy modules
>>> lazy_mlp = LazyMLP()
>>> # transforms the network's device and dtype
>>> # NOTE: these transforms can and should be applied after construction and before any 'dry runs'
>>> lazy_mlp = lazy_mlp.cuda().double()
>>> lazy_mlp
LazyMLP( (fc1): LazyLinear(in_features=0, out_features=10, bias=True)
(relu1): ReLU()
(fc2): LazyLinear(in_features=0, out_features=1, bias=True)
(relu2): ReLU()
)
>>> # performs a dry run to initialize the network's lazy modules
>>> lazy_mlp(torch.ones(10,10).cuda())
>>> # after initialization, LazyLinear modules become regular Linear modules
>>> lazy_mlp
LazyMLP(
(fc1): Linear(in_features=10, out_features=10, bias=True)
(relu1): ReLU()
(fc2): Linear(in_features=10, out_features=1, bias=True)
(relu2): ReLU()
)
>>> # attaches an optimizer, since parameters can now be used as usual
>>> optim = torch.optim.SGD(mlp.parameters(), lr=0.01)
A final caveat when using lazy modules is that the order of initialization of a network's
parameters may change, since the lazy modules are always initialized after other modules.
For example, if the LazyMLP class defined above had a :class:`torch.nn.LazyLinear` module
first and then a regular :class:`torch.nn.Linear` second, the second module would be
initialized on construction and the first module would be initialized during the first dry run.
This can cause the parameters of a network using lazy modules to be initialized differently
than the parameters of a network without lazy modules as the order of parameter initializations,
which often depends on a stateful random number generator, is different.
Check :doc:`/notes/randomness` for more details.
Lazy modules can be serialized with a state dict like other modules. For example:
>>> lazy_mlp = LazyMLP()
>>> # The state dict shows the uninitialized parameters
>>> lazy_mlp.state_dict()
OrderedDict([('fc1.weight', Uninitialized parameter),
('fc1.bias',
tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30,
4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])),
('fc2.weight', Uninitialized parameter),
('fc2.bias', tensor([0.0019]))])
Lazy modules can load regular :class:`torch.nn.Parameter` s (i.e. you can serialize/deserialize
initialized LazyModules and they will remain initialized)
>>> full_mlp = LazyMLP()
>>> # Dry run to initialize another module
>>> full_mlp.forward(torch.ones(10, 1))
>>> # Load an initialized state into a lazy module
>>> lazy_mlp.load_state_dict(full_mlp.state_dict())
>>> # The state dict now holds valid values
>>> lazy_mlp.state_dict()
OrderedDict([('fc1.weight',
tensor([[-0.3837],
[ 0.0907],
[ 0.6708],
[-0.5223],
[-0.9028],
[ 0.2851],
[-0.4537],
[ 0.6813],
[ 0.5766],
[-0.8678]])),
('fc1.bias',
tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30,
4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])),
('fc2.weight',
tensor([[ 0.1320, 0.2938, 0.0679, 0.2793, 0.1088, -0.1795, -0.2301, 0.2807,
0.2479, 0.1091]])),
('fc2.bias', tensor([0.0019]))])
Note, however, that the loaded parameters will not be replaced when doing a "dry run" if they are initialized
when the state is loaded. This prevents using initialized modules in different contexts.
"""
# modules inheriting from this will change their __class__ to the specified
# one after they are fully initialized
cls_to_become = None
def __init__(self: _LazyProtocol, *args, **kwargs):
# Mypy doesnt like this super call in a mixin
super().__init__(*args, **kwargs) # type: ignore[misc]
self._load_hook = self._register_load_state_dict_pre_hook(self._lazy_load_hook)
self._initialize_hook = self.register_forward_pre_hook(self._infer_parameters)
warnings.warn('Lazy modules are a new feature under heavy development '
'so changes to the API or functionality can happen at any moment.')
def _save_to_state_dict(self: _LazyProtocol, destination, prefix, keep_vars):
# This should be ideally implemented as a hook,
# but we should override `detach` in the UninitializedParameter to return itself
# which is not clean
for name, param in self._parameters.items():
if param is not None:
if not (is_lazy(param) or keep_vars):
param = param.detach()
destination[prefix + name] = param
for name, buf in self._buffers.items():
if buf is not None and name not in self._non_persistent_buffers_set:
if not (is_lazy(buf) or keep_vars):
buf = buf.detach()
destination[prefix + name] = buf
def _lazy_load_hook(
self: _LazyProtocol, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
"""load_state_dict pre-hook function for lazy buffers and parameters.
The purpose of this hook is to adjust the current state and/or
``state_dict`` being loaded so that a module instance serialized in
both un/initialized state can be deserialized onto both un/initialized
module instance.
See comment in ``torch.nn.Module._register_load_state_dict_pre_hook``
for the details of the hook specification.
"""
for name, param in itertools.chain(self._parameters.items(), self._buffers.items()):
key = prefix + name
if key in state_dict and param is not None:
input_param = state_dict[key]
if is_lazy(param):
# The current parameter is not initialized but the one being loaded one is
# create a new parameter based on the uninitialized one
if not is_lazy(input_param):
with torch.no_grad():
param.materialize(input_param.shape)
def initialize_parameters(self: _LazyProtocol, *args, **kwargs):
r"""Initialize parameters according to the input batch properties.
This adds an interface to isolate parameter initialization from the
forward pass when doing parameter shape inference.
"""
raise NotImplementedError('initialize_parameters is not implemented for {}'.format(self.__class__.__name__))
def has_uninitialized_params(self: _LazyProtocol):
r"""Check if a module has parameters that are not initialized
"""
# This is to avoid the JIT to track this parameter and force
# custom modules __setstate__ to add it
params = self._parameters.values()
buffers = self._buffers.values()
for param in itertools.chain(params, buffers):
if is_lazy(param):
return True
return False
def _infer_parameters(self: _LazyProtocol, module, input):
r"""Infers the size and initializes the parameters according to the
provided input batch.
Given a module that contains parameters that were declared inferrable
using :class:`torch.nn.parameter.ParameterMode.Infer`, runs a forward pass
in the complete module using the provided input to initialize all the parameters
as needed.
The module is set into evaluation mode before running the forward pass in order
to avoid saving statistics or calculating gradients
"""
module.initialize_parameters(*input)
if module.has_uninitialized_params():
raise RuntimeError('module {} has not been fully initialized'.format(self._get_name()))
module._initialize_hook.remove()
module._load_hook.remove()
delattr(module, '_initialize_hook')
delattr(module, '_load_hook')
if module.cls_to_become is not None:
module.__class__ = module.cls_to_become
def _replicate_for_data_parallel(self: _LazyProtocol):
raise RuntimeError('Modules with uninitialized parameters can\'t be used with `DataParallel`. '
'Run a dummy forward pass to correctly initialize the modules')
| pytorch-master | torch/nn/modules/lazy.py |
import torch
import numbers
from torch.nn.parameter import Parameter
from .module import Module
from ._functions import CrossMapLRN2d as _cross_map_lrn2d
from .. import functional as F
from .. import init
from torch import Tensor, Size
from typing import Union, List, Tuple
__all__ = ['LocalResponseNorm', 'CrossMapLRN2d', 'LayerNorm', 'GroupNorm']
class LocalResponseNorm(Module):
r"""Applies local response normalization over an input signal composed
of several input planes, where channels occupy the second dimension.
Applies normalization across channels.
.. math::
b_{c} = a_{c}\left(k + \frac{\alpha}{n}
\sum_{c'=\max(0, c-n/2)}^{\min(N-1,c+n/2)}a_{c'}^2\right)^{-\beta}
Args:
size: amount of neighbouring channels used for normalization
alpha: multiplicative factor. Default: 0.0001
beta: exponent. Default: 0.75
k: additive factor. Default: 1
Shape:
- Input: :math:`(N, C, *)`
- Output: :math:`(N, C, *)` (same shape as input)
Examples::
>>> lrn = nn.LocalResponseNorm(2)
>>> signal_2d = torch.randn(32, 5, 24, 24)
>>> signal_4d = torch.randn(16, 5, 7, 7, 7, 7)
>>> output_2d = lrn(signal_2d)
>>> output_4d = lrn(signal_4d)
"""
__constants__ = ['size', 'alpha', 'beta', 'k']
size: int
alpha: float
beta: float
k: float
def __init__(self, size: int, alpha: float = 1e-4, beta: float = 0.75, k: float = 1.) -> None:
super(LocalResponseNorm, self).__init__()
self.size = size
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, input: Tensor) -> Tensor:
return F.local_response_norm(input, self.size, self.alpha, self.beta,
self.k)
def extra_repr(self):
return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__)
class CrossMapLRN2d(Module):
size: int
alpha: float
beta: float
k: float
def __init__(self, size: int, alpha: float = 1e-4, beta: float = 0.75, k: float = 1) -> None:
super(CrossMapLRN2d, self).__init__()
self.size = size
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, input: Tensor) -> Tensor:
return _cross_map_lrn2d.apply(input, self.size, self.alpha, self.beta,
self.k)
def extra_repr(self) -> str:
return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__)
_shape_t = Union[int, List[int], Size]
class LayerNorm(Module):
r"""Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>`__
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated over the last `D` dimensions, where `D`
is the dimension of :attr:`normalized_shape`. For example, if :attr:`normalized_shape`
is ``(3, 5)`` (a 2-dimensional shape), the mean and standard-deviation are computed over
the last 2 dimensions of the input (i.e. ``input.mean((-2, -1))``).
:math:`\gamma` and :math:`\beta` are learnable affine transform parameters of
:attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.
The standard-deviation is calculated via the biased estimator, equivalent to
`torch.var(input, unbiased=False)`.
.. note::
Unlike Batch Normalization and Instance Normalization, which applies
scalar scale and bias for each entire channel/plane with the
:attr:`affine` option, Layer Normalization applies per-element scale and
bias with :attr:`elementwise_affine`.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
normalized_shape (int or list or torch.Size): input shape from an expected input
of size
.. math::
[* \times \text{normalized\_shape}[0] \times \text{normalized\_shape}[1]
\times \ldots \times \text{normalized\_shape}[-1]]
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps: a value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine: a boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Attributes:
weight: the learnable weights of the module of shape
:math:`\text{normalized\_shape}` when :attr:`elementwise_affine` is set to ``True``.
The values are initialized to 1.
bias: the learnable bias of the module of shape
:math:`\text{normalized\_shape}` when :attr:`elementwise_affine` is set to ``True``.
The values are initialized to 0.
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
Examples::
>>> # NLP Example
>>> batch, sentence_length, embedding_dim = 20, 5, 10
>>> embedding = torch.randn(batch, sentence_length, embedding_dim)
>>> layer_norm = nn.LayerNorm(embedding_dim)
>>> # Activate module
>>> layer_norm(embedding)
>>>
>>> # Image Example
>>> N, C, H, W = 20, 5, 10, 10
>>> input = torch.randn(N, C, H, W)
>>> # Normalize over the last three dimensions (i.e. the channel and spatial dimensions)
>>> # as shown in the image below
>>> layer_norm = nn.LayerNorm([C, H, W])
>>> output = layer_norm(input)
.. image:: ../_static/img/nn/layer_norm.jpg
:scale: 50 %
"""
__constants__ = ['normalized_shape', 'eps', 'elementwise_affine']
normalized_shape: Tuple[int, ...]
eps: float
elementwise_affine: bool
def __init__(self, normalized_shape: _shape_t, eps: float = 1e-5, elementwise_affine: bool = True,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(LayerNorm, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
# mypy error: incompatible types in assignment
normalized_shape = (normalized_shape,) # type: ignore[assignment]
self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type]
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
self.bias = Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
if self.elementwise_affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input: Tensor) -> Tensor:
return F.layer_norm(
input, self.normalized_shape, self.weight, self.bias, self.eps)
def extra_repr(self) -> str:
return '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
class GroupNorm(Module):
r"""Applies Group Normalization over a mini-batch of inputs as described in
the paper `Group Normalization <https://arxiv.org/abs/1803.08494>`__
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The input channels are separated into :attr:`num_groups` groups, each containing
``num_channels / num_groups`` channels. :attr:`num_channels` must be divisible by
:attr:`num_groups`. The mean and standard-deviation are calculated
separately over the each group. :math:`\gamma` and :math:`\beta` are learnable
per-channel affine transform parameter vectors of size :attr:`num_channels` if
:attr:`affine` is ``True``.
The standard-deviation is calculated via the biased estimator, equivalent to
`torch.var(input, unbiased=False)`.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
num_groups (int): number of groups to separate the channels into
num_channels (int): number of channels expected in input
eps: a value added to the denominator for numerical stability. Default: 1e-5
affine: a boolean value that when set to ``True``, this module
has learnable per-channel affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, C, *)` where :math:`C=\text{num\_channels}`
- Output: :math:`(N, C, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 6, 10, 10)
>>> # Separate 6 channels into 3 groups
>>> m = nn.GroupNorm(3, 6)
>>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm)
>>> m = nn.GroupNorm(6, 6)
>>> # Put all 6 channels into a single group (equivalent with LayerNorm)
>>> m = nn.GroupNorm(1, 6)
>>> # Activating the module
>>> output = m(input)
"""
__constants__ = ['num_groups', 'num_channels', 'eps', 'affine']
num_groups: int
num_channels: int
eps: float
affine: bool
def __init__(self, num_groups: int, num_channels: int, eps: float = 1e-5, affine: bool = True,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(GroupNorm, self).__init__()
if num_channels % num_groups != 0:
raise ValueError('num_channels must be divisible by num_groups')
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(torch.empty(num_channels, **factory_kwargs))
self.bias = Parameter(torch.empty(num_channels, **factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input: Tensor) -> Tensor:
return F.group_norm(
input, self.num_groups, self.weight, self.bias, self.eps)
def extra_repr(self) -> str:
return '{num_groups}, {num_channels}, eps={eps}, ' \
'affine={affine}'.format(**self.__dict__)
# TODO: ContrastiveNorm2d
# TODO: DivisiveNorm2d
# TODO: SubtractiveNorm2d
| pytorch-master | torch/nn/modules/normalization.py |
import math
import warnings
import numbers
from typing import List, Tuple, Optional, overload
import torch
from torch import Tensor
from .module import Module
from ..parameter import Parameter
from ..utils.rnn import PackedSequence
from .. import init
from ... import _VF
__all__ = ['RNNBase', 'RNN', 'LSTM', 'GRU', 'RNNCellBase', 'RNNCell', 'LSTMCell', 'GRUCell']
_rnn_impls = {
'RNN_TANH': _VF.rnn_tanh,
'RNN_RELU': _VF.rnn_relu,
}
def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
return tensor.index_select(dim, permutation)
def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
warnings.warn("apply_permutation is deprecated, please use tensor.index_select(dim, permutation) instead")
return _apply_permutation(tensor, permutation, dim)
class RNNBase(Module):
__constants__ = ['mode', 'input_size', 'hidden_size', 'num_layers', 'bias',
'batch_first', 'dropout', 'bidirectional', 'proj_size']
__jit_unused_properties__ = ['all_weights']
mode: str
input_size: int
hidden_size: int
num_layers: int
bias: bool
batch_first: bool
dropout: float
bidirectional: bool
proj_size: int
def __init__(self, mode: str, input_size: int, hidden_size: int,
num_layers: int = 1, bias: bool = True, batch_first: bool = False,
dropout: float = 0., bidirectional: bool = False, proj_size: int = 0,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(RNNBase, self).__init__()
self.mode = mode
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = float(dropout)
self.bidirectional = bidirectional
self.proj_size = proj_size
num_directions = 2 if bidirectional else 1
if not isinstance(dropout, numbers.Number) or not 0 <= dropout <= 1 or \
isinstance(dropout, bool):
raise ValueError("dropout should be a number in range [0, 1] "
"representing the probability of an element being "
"zeroed")
if dropout > 0 and num_layers == 1:
warnings.warn("dropout option adds dropout after all but last "
"recurrent layer, so non-zero dropout expects "
"num_layers greater than 1, but got dropout={} and "
"num_layers={}".format(dropout, num_layers))
if proj_size < 0:
raise ValueError("proj_size should be a positive integer or zero to disable projections")
if proj_size >= hidden_size:
raise ValueError("proj_size has to be smaller than hidden_size")
if mode == 'LSTM':
gate_size = 4 * hidden_size
elif mode == 'GRU':
gate_size = 3 * hidden_size
elif mode == 'RNN_TANH':
gate_size = hidden_size
elif mode == 'RNN_RELU':
gate_size = hidden_size
else:
raise ValueError("Unrecognized RNN mode: " + mode)
self._flat_weights_names = []
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
real_hidden_size = proj_size if proj_size > 0 else hidden_size
layer_input_size = input_size if layer == 0 else real_hidden_size * num_directions
w_ih = Parameter(torch.empty((gate_size, layer_input_size), **factory_kwargs))
w_hh = Parameter(torch.empty((gate_size, real_hidden_size), **factory_kwargs))
b_ih = Parameter(torch.empty(gate_size, **factory_kwargs))
# Second bias vector included for CuDNN compatibility. Only one
# bias vector is needed in standard definition.
b_hh = Parameter(torch.empty(gate_size, **factory_kwargs))
layer_params: Tuple[Tensor, ...] = ()
if self.proj_size == 0:
if bias:
layer_params = (w_ih, w_hh, b_ih, b_hh)
else:
layer_params = (w_ih, w_hh)
else:
w_hr = Parameter(torch.empty((proj_size, hidden_size), **factory_kwargs))
if bias:
layer_params = (w_ih, w_hh, b_ih, b_hh, w_hr)
else:
layer_params = (w_ih, w_hh, w_hr)
suffix = '_reverse' if direction == 1 else ''
param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
if bias:
param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
if self.proj_size > 0:
param_names += ['weight_hr_l{}{}']
param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, layer_params):
setattr(self, name, param)
self._flat_weights_names.extend(param_names)
self._all_weights.append(param_names)
self._flat_weights = [(lambda wn: getattr(self, wn) if hasattr(self, wn) else None)(wn) for wn in self._flat_weights_names]
self.flatten_parameters()
self.reset_parameters()
def __setattr__(self, attr, value):
if hasattr(self, "_flat_weights_names") and attr in self._flat_weights_names:
# keep self._flat_weights up to date if you do self.weight = ...
idx = self._flat_weights_names.index(attr)
self._flat_weights[idx] = value
super(RNNBase, self).__setattr__(attr, value)
def flatten_parameters(self) -> None:
"""Resets parameter data pointer so that they can use faster code paths.
Right now, this works only if the module is on the GPU and cuDNN is enabled.
Otherwise, it's a no-op.
"""
# Short-circuits if _flat_weights is only partially instantiated
if len(self._flat_weights) != len(self._flat_weights_names):
return
for w in self._flat_weights:
if not isinstance(w, Tensor):
return
# Short-circuits if any tensor in self._flat_weights is not acceptable to cuDNN
# or the tensors in _flat_weights are of different dtypes
first_fw = self._flat_weights[0]
dtype = first_fw.dtype
for fw in self._flat_weights:
if (not isinstance(fw.data, Tensor) or not (fw.data.dtype == dtype) or
not fw.data.is_cuda or
not torch.backends.cudnn.is_acceptable(fw.data)):
return
# If any parameters alias, we fall back to the slower, copying code path. This is
# a sufficient check, because overlapping parameter buffers that don't completely
# alias would break the assumptions of the uniqueness check in
# Module.named_parameters().
unique_data_ptrs = set(p.data_ptr() for p in self._flat_weights)
if len(unique_data_ptrs) != len(self._flat_weights):
return
with torch.cuda.device_of(first_fw):
import torch.backends.cudnn.rnn as rnn
# Note: no_grad() is necessary since _cudnn_rnn_flatten_weight is
# an inplace operation on self._flat_weights
with torch.no_grad():
if torch._use_cudnn_rnn_flatten_weight():
num_weights = 4 if self.bias else 2
if self.proj_size > 0:
num_weights += 1
torch._cudnn_rnn_flatten_weight(
self._flat_weights, num_weights,
self.input_size, rnn.get_cudnn_mode(self.mode),
self.hidden_size, self.proj_size, self.num_layers,
self.batch_first, bool(self.bidirectional))
def _apply(self, fn):
ret = super(RNNBase, self)._apply(fn)
# Resets _flat_weights
# Note: be v. careful before removing this, as 3rd party device types
# likely rely on this behavior to properly .to() modules like LSTM.
self._flat_weights = [(lambda wn: getattr(self, wn) if hasattr(self, wn) else None)(wn) for wn in self._flat_weights_names]
# Flattens params (on CUDA)
self.flatten_parameters()
return ret
def reset_parameters(self) -> None:
stdv = 1.0 / math.sqrt(self.hidden_size) if self.hidden_size > 0 else 0
for weight in self.parameters():
init.uniform_(weight, -stdv, stdv)
def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None:
expected_input_dim = 2 if batch_sizes is not None else 3
if input.dim() != expected_input_dim:
raise RuntimeError(
'input must have {} dimensions, got {}'.format(
expected_input_dim, input.dim()))
if self.input_size != input.size(-1):
raise RuntimeError(
'input.size(-1) must be equal to input_size. Expected {}, got {}'.format(
self.input_size, input.size(-1)))
def get_expected_hidden_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]:
if batch_sizes is not None:
mini_batch = int(batch_sizes[0])
else:
mini_batch = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
if self.proj_size > 0:
expected_hidden_size = (self.num_layers * num_directions,
mini_batch, self.proj_size)
else:
expected_hidden_size = (self.num_layers * num_directions,
mini_batch, self.hidden_size)
return expected_hidden_size
def check_hidden_size(self, hx: Tensor, expected_hidden_size: Tuple[int, int, int],
msg: str = 'Expected hidden size {}, got {}') -> None:
if hx.size() != expected_hidden_size:
raise RuntimeError(msg.format(expected_hidden_size, list(hx.size())))
def check_forward_args(self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]):
self.check_input(input, batch_sizes)
expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
self.check_hidden_size(hidden, expected_hidden_size)
def permute_hidden(self, hx: Tensor, permutation: Optional[Tensor]):
if permutation is None:
return hx
return _apply_permutation(hx, permutation)
def extra_repr(self) -> str:
s = '{input_size}, {hidden_size}'
if self.proj_size != 0:
s += ', proj_size={proj_size}'
if self.num_layers != 1:
s += ', num_layers={num_layers}'
if self.bias is not True:
s += ', bias={bias}'
if self.batch_first is not False:
s += ', batch_first={batch_first}'
if self.dropout != 0:
s += ', dropout={dropout}'
if self.bidirectional is not False:
s += ', bidirectional={bidirectional}'
return s.format(**self.__dict__)
def __setstate__(self, d):
super(RNNBase, self).__setstate__(d)
if 'all_weights' in d:
self._all_weights = d['all_weights']
# In PyTorch 1.8 we added a proj_size member variable to LSTM.
# LSTMs that were serialized via torch.save(module) before PyTorch 1.8
# don't have it, so to preserve compatibility we set proj_size here.
if 'proj_size' not in d:
self.proj_size = 0
if isinstance(self._all_weights[0][0], str):
return
num_layers = self.num_layers
num_directions = 2 if self.bidirectional else 1
self._flat_weights_names = []
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
suffix = '_reverse' if direction == 1 else ''
weights = ['weight_ih_l{}{}', 'weight_hh_l{}{}', 'bias_ih_l{}{}',
'bias_hh_l{}{}', 'weight_hr_l{}{}']
weights = [x.format(layer, suffix) for x in weights]
if self.bias:
if self.proj_size > 0:
self._all_weights += [weights]
self._flat_weights_names.extend(weights)
else:
self._all_weights += [weights[:4]]
self._flat_weights_names.extend(weights[:4])
else:
if self.proj_size > 0:
self._all_weights += [weights[:2]] + [weights[-1:]]
self._flat_weights_names.extend(weights[:2] + [weights[-1:]])
else:
self._all_weights += [weights[:2]]
self._flat_weights_names.extend(weights[:2])
self._flat_weights = [(lambda wn: getattr(self, wn) if hasattr(self, wn) else None)(wn) for wn in self._flat_weights_names]
@property
def all_weights(self) -> List[List[Parameter]]:
return [[getattr(self, weight) for weight in weights] for weights in self._all_weights]
def _replicate_for_data_parallel(self):
replica = super(RNNBase, self)._replicate_for_data_parallel()
# Need to copy these caches, otherwise the replica will share the same
# flat weights list.
replica._flat_weights = replica._flat_weights[:]
replica._flat_weights_names = replica._flat_weights_names[:]
return replica
class RNN(RNNBase):
r"""Applies a multi-layer Elman RNN with :math:`\tanh` or :math:`\text{ReLU}` non-linearity to an
input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
h_t = \tanh(x_t W_{ih}^T + b_{ih} + h_{t-1}W_{hh}^T + b_{hh})
where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is
the input at time `t`, and :math:`h_{(t-1)}` is the hidden state of the
previous layer at time `t-1` or the initial hidden state at time `0`.
If :attr:`nonlinearity` is ``'relu'``, then :math:`\text{ReLU}` is used instead of :math:`\tanh`.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two RNNs together to form a `stacked RNN`,
with the second RNN taking in outputs of the first RNN and
computing the final results. Default: 1
nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'``
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as `(batch, seq, feature)` instead of `(seq, batch, feature)`.
Note that this does not apply to hidden or cell states. See the
Inputs/Outputs sections below for details. Default: ``False``
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
RNN layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional RNN. Default: ``False``
Inputs: input, h_0
* **input**: tensor of shape :math:`(L, H_{in})` for unbatched input,
:math:`(L, N, H_{in})` when ``batch_first=False`` or
:math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of
the input sequence. The input can also be a packed variable length sequence.
See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
:func:`torch.nn.utils.rnn.pack_sequence` for details.
* **h_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
:math:`(D * \text{num\_layers}, N, H_{out})` containing the initial hidden
state for the input sequence batch. Defaults to zeros if not provided.
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input\_size} \\
H_{out} ={} & \text{hidden\_size}
\end{aligned}
Outputs: output, h_n
* **output**: tensor of shape :math:`(L, D * H_{out})` for unbatched input,
:math:`(L, N, D * H_{out})` when ``batch_first=False`` or
:math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features
`(h_t)` from the last layer of the RNN, for each `t`. If a
:class:`torch.nn.utils.rnn.PackedSequence` has been given as the input, the output
will also be a packed sequence.
* **h_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
:math:`(D * \text{num\_layers}, N, H_{out})` containing the final hidden state
for each element in the batch.
Attributes:
weight_ih_l[k]: the learnable input-hidden weights of the k-th layer,
of shape `(hidden_size, input_size)` for `k = 0`. Otherwise, the shape is
`(hidden_size, num_directions * hidden_size)`
weight_hh_l[k]: the learnable hidden-hidden weights of the k-th layer,
of shape `(hidden_size, hidden_size)`
bias_ih_l[k]: the learnable input-hidden bias of the k-th layer,
of shape `(hidden_size)`
bias_hh_l[k]: the learnable hidden-hidden bias of the k-th layer,
of shape `(hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
.. note::
For bidirectional RNNs, forward and backward are directions 0 and 1 respectively.
Example of splitting the output layers when ``batch_first=False``:
``output.view(seq_len, batch, num_directions, hidden_size)``.
.. note::
``batch_first`` argument is ignored for unbatched inputs.
.. include:: ../cudnn_rnn_determinism.rst
.. include:: ../cudnn_persistent_rnn.rst
Examples::
>>> rnn = nn.RNN(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> output, hn = rnn(input, h0)
"""
def __init__(self, *args, **kwargs):
if 'proj_size' in kwargs:
raise ValueError("proj_size argument is only supported for LSTM, not RNN or GRU")
self.nonlinearity = kwargs.pop('nonlinearity', 'tanh')
if self.nonlinearity == 'tanh':
mode = 'RNN_TANH'
elif self.nonlinearity == 'relu':
mode = 'RNN_RELU'
else:
raise ValueError("Unknown nonlinearity '{}'".format(self.nonlinearity))
super(RNN, self).__init__(mode, *args, **kwargs)
@overload
@torch._jit_internal._overload_method # noqa: F811
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
pass
@overload
@torch._jit_internal._overload_method # noqa: F811
def forward(self, input: PackedSequence, hx: Optional[Tensor] = None) -> Tuple[PackedSequence, Tensor]:
pass
def forward(self, input, hx=None): # noqa: F811
orig_input = input
if isinstance(orig_input, PackedSequence):
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = int(batch_sizes[0])
else:
batch_sizes = None
is_batched = input.dim() == 3
batch_dim = 0 if self.batch_first else 1
if not is_batched:
input = input.unsqueeze(batch_dim)
if hx is not None:
if hx.dim() != 2:
raise RuntimeError(
f"For unbatched 2-D input, hx should also be 2-D but got {hx.dim()}-D tensor")
hx = hx.unsqueeze(1)
else:
if hx is not None and hx.dim() != 3:
raise RuntimeError(
f"For batched 3-D input, hx should also be 3-D but got {hx.dim()}-D tensor")
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
if hx is None:
num_directions = 2 if self.bidirectional else 1
hx = torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
assert hx is not None
self.check_forward_args(input, hx, batch_sizes)
assert self.mode == 'RNN_TANH' or self.mode == 'RNN_RELU'
if batch_sizes is None:
if self.mode == 'RNN_TANH':
result = _VF.rnn_tanh(input, hx, self._flat_weights, self.bias, self.num_layers,
self.dropout, self.training, self.bidirectional,
self.batch_first)
else:
result = _VF.rnn_relu(input, hx, self._flat_weights, self.bias, self.num_layers,
self.dropout, self.training, self.bidirectional,
self.batch_first)
else:
if self.mode == 'RNN_TANH':
result = _VF.rnn_tanh(input, batch_sizes, hx, self._flat_weights, self.bias,
self.num_layers, self.dropout, self.training,
self.bidirectional)
else:
result = _VF.rnn_relu(input, batch_sizes, hx, self._flat_weights, self.bias,
self.num_layers, self.dropout, self.training,
self.bidirectional)
output = result[0]
hidden = result[1]
if isinstance(orig_input, PackedSequence):
output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
return output_packed, self.permute_hidden(hidden, unsorted_indices)
if not is_batched:
output = output.squeeze(batch_dim)
hidden = hidden.squeeze(1)
return output, self.permute_hidden(hidden, unsorted_indices)
# XXX: LSTM and GRU implementation is different from RNNBase, this is because:
# 1. we want to support nn.LSTM and nn.GRU in TorchScript and TorchScript in
# its current state could not support the python Union Type or Any Type
# 2. TorchScript static typing does not allow a Function or Callable type in
# Dict values, so we have to separately call _VF instead of using _rnn_impls
# 3. This is temporary only and in the transition state that we want to make it
# on time for the release
#
# More discussion details in https://github.com/pytorch/pytorch/pull/23266
#
# TODO: remove the overriding implementations for LSTM and GRU when TorchScript
# support expressing these two modules generally.
class LSTM(RNNBase):
r"""Applies a multi-layer long short-term memory (LSTM) RNN to an input
sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll} \\
i_t = \sigma(W_{ii} x_t + b_{ii} + W_{hi} h_{t-1} + b_{hi}) \\
f_t = \sigma(W_{if} x_t + b_{if} + W_{hf} h_{t-1} + b_{hf}) \\
g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hg} h_{t-1} + b_{hg}) \\
o_t = \sigma(W_{io} x_t + b_{io} + W_{ho} h_{t-1} + b_{ho}) \\
c_t = f_t \odot c_{t-1} + i_t \odot g_t \\
h_t = o_t \odot \tanh(c_t) \\
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`c_t` is the cell
state at time `t`, :math:`x_t` is the input at time `t`, :math:`h_{t-1}`
is the hidden state of the layer at time `t-1` or the initial hidden
state at time `0`, and :math:`i_t`, :math:`f_t`, :math:`g_t`,
:math:`o_t` are the input, forget, cell, and output gates, respectively.
:math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product.
In a multilayer LSTM, the input :math:`x^{(l)}_t` of the :math:`l` -th layer
(:math:`l >= 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by
dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random
variable which is :math:`0` with probability :attr:`dropout`.
If ``proj_size > 0`` is specified, LSTM with projections will be used. This changes
the LSTM cell in the following way. First, the dimension of :math:`h_t` will be changed from
``hidden_size`` to ``proj_size`` (dimensions of :math:`W_{hi}` will be changed accordingly).
Second, the output hidden state of each layer will be multiplied by a learnable projection
matrix: :math:`h_t = W_{hr}h_t`. Note that as a consequence of this, the output
of LSTM network will be of different shape as well. See Inputs/Outputs sections below for exact
dimensions of all variables. You can find more details in https://arxiv.org/abs/1402.1128.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two LSTMs together to form a `stacked LSTM`,
with the second LSTM taking in outputs of the first LSTM and
computing the final results. Default: 1
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as `(batch, seq, feature)` instead of `(seq, batch, feature)`.
Note that this does not apply to hidden or cell states. See the
Inputs/Outputs sections below for details. Default: ``False``
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
LSTM layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional LSTM. Default: ``False``
proj_size: If ``> 0``, will use LSTM with projections of corresponding size. Default: 0
Inputs: input, (h_0, c_0)
* **input**: tensor of shape :math:`(L, H_{in})` for unbatched input,
:math:`(L, N, H_{in})` when ``batch_first=False`` or
:math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of
the input sequence. The input can also be a packed variable length sequence.
See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
:func:`torch.nn.utils.rnn.pack_sequence` for details.
* **h_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
:math:`(D * \text{num\_layers}, N, H_{out})` containing the
initial hidden state for each element in the input sequence.
Defaults to zeros if (h_0, c_0) is not provided.
* **c_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{cell})` for unbatched input or
:math:`(D * \text{num\_layers}, N, H_{cell})` containing the
initial cell state for each element in the input sequence.
Defaults to zeros if (h_0, c_0) is not provided.
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input\_size} \\
H_{cell} ={} & \text{hidden\_size} \\
H_{out} ={} & \text{proj\_size if } \text{proj\_size}>0 \text{ otherwise hidden\_size} \\
\end{aligned}
Outputs: output, (h_n, c_n)
* **output**: tensor of shape :math:`(L, D * H_{out})` for unbatched input,
:math:`(L, N, D * H_{out})` when ``batch_first=False`` or
:math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features
`(h_t)` from the last layer of the LSTM, for each `t`. If a
:class:`torch.nn.utils.rnn.PackedSequence` has been given as the input, the output
will also be a packed sequence. When ``bidirectional=True``, `output` will contain
a concatenation of the forward and reverse hidden states at each time step in the sequence.
* **h_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
:math:`(D * \text{num\_layers}, N, H_{out})` containing the
final hidden state for each element in the sequence. When ``bidirectional=True``,
`h_n` will contain a concatenation of the final forward and reverse hidden states, respectively.
* **c_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{cell})` for unbatched input or
:math:`(D * \text{num\_layers}, N, H_{cell})` containing the
final cell state for each element in the sequence. When ``bidirectional=True``,
`c_n` will contain a concatenation of the final forward and reverse cell states, respectively.
Attributes:
weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
`(W_ii|W_if|W_ig|W_io)`, of shape `(4*hidden_size, input_size)` for `k = 0`.
Otherwise, the shape is `(4*hidden_size, num_directions * hidden_size)`. If
``proj_size > 0`` was specified, the shape will be
`(4*hidden_size, num_directions * proj_size)` for `k > 0`
weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
`(W_hi|W_hf|W_hg|W_ho)`, of shape `(4*hidden_size, hidden_size)`. If ``proj_size > 0``
was specified, the shape will be `(4*hidden_size, proj_size)`.
bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
`(b_ii|b_if|b_ig|b_io)`, of shape `(4*hidden_size)`
bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
`(b_hi|b_hf|b_hg|b_ho)`, of shape `(4*hidden_size)`
weight_hr_l[k] : the learnable projection weights of the :math:`\text{k}^{th}` layer
of shape `(proj_size, hidden_size)`. Only present when ``proj_size > 0`` was
specified.
weight_ih_l[k]_reverse: Analogous to `weight_ih_l[k]` for the reverse direction.
Only present when ``bidirectional=True``.
weight_hh_l[k]_reverse: Analogous to `weight_hh_l[k]` for the reverse direction.
Only present when ``bidirectional=True``.
bias_ih_l[k]_reverse: Analogous to `bias_ih_l[k]` for the reverse direction.
Only present when ``bidirectional=True``.
bias_hh_l[k]_reverse: Analogous to `bias_hh_l[k]` for the reverse direction.
Only present when ``bidirectional=True``.
weight_hr_l[k]_reverse: Analogous to `weight_hr_l[k]` for the reverse direction.
Only present when ``bidirectional=True`` and ``proj_size > 0`` was specified.
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
.. note::
For bidirectional LSTMs, forward and backward are directions 0 and 1 respectively.
Example of splitting the output layers when ``batch_first=False``:
``output.view(seq_len, batch, num_directions, hidden_size)``.
.. note::
For bidirectional LSTMs, `h_n` is not equivalent to the last element of `output`; the
former contains the final forward and reverse hidden states, while the latter contains the
final forward hidden state and the initial reverse hidden state.
.. note::
``batch_first`` argument is ignored for unbatched inputs.
.. include:: ../cudnn_rnn_determinism.rst
.. include:: ../cudnn_persistent_rnn.rst
Examples::
>>> rnn = nn.LSTM(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> c0 = torch.randn(2, 3, 20)
>>> output, (hn, cn) = rnn(input, (h0, c0))
"""
def __init__(self, *args, **kwargs):
super(LSTM, self).__init__('LSTM', *args, **kwargs)
def get_expected_cell_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]:
if batch_sizes is not None:
mini_batch = int(batch_sizes[0])
else:
mini_batch = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
expected_hidden_size = (self.num_layers * num_directions,
mini_batch, self.hidden_size)
return expected_hidden_size
# In the future, we should prevent mypy from applying contravariance rules here.
# See torch/nn/modules/module.py::_forward_unimplemented
def check_forward_args(self, # type: ignore[override]
input: Tensor,
hidden: Tuple[Tensor, Tensor],
batch_sizes: Optional[Tensor],
):
self.check_input(input, batch_sizes)
self.check_hidden_size(hidden[0], self.get_expected_hidden_size(input, batch_sizes),
'Expected hidden[0] size {}, got {}')
self.check_hidden_size(hidden[1], self.get_expected_cell_size(input, batch_sizes),
'Expected hidden[1] size {}, got {}')
# Same as above, see torch/nn/modules/module.py::_forward_unimplemented
def permute_hidden(self, # type: ignore[override]
hx: Tuple[Tensor, Tensor],
permutation: Optional[Tensor]
) -> Tuple[Tensor, Tensor]:
if permutation is None:
return hx
return _apply_permutation(hx[0], permutation), _apply_permutation(hx[1], permutation)
# Same as above, see torch/nn/modules/module.py::_forward_unimplemented
@overload # type: ignore[override]
@torch._jit_internal._overload_method # noqa: F811
def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: # noqa: F811
pass
# Same as above, see torch/nn/modules/module.py::_forward_unimplemented
@overload
@torch._jit_internal._overload_method # noqa: F811
def forward(self, input: PackedSequence, hx: Optional[Tuple[Tensor, Tensor]] = None
) -> Tuple[PackedSequence, Tuple[Tensor, Tensor]]: # noqa: F811
pass
def forward(self, input, hx=None): # noqa: F811
orig_input = input
# xxx: isinstance check needs to be in conditional for TorchScript to compile
batch_sizes = None
if isinstance(orig_input, PackedSequence):
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
else:
batch_sizes = None
is_batched = input.dim() == 3
batch_dim = 0 if self.batch_first else 1
if not is_batched:
input = input.unsqueeze(batch_dim)
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
if hx is None:
num_directions = 2 if self.bidirectional else 1
real_hidden_size = self.proj_size if self.proj_size > 0 else self.hidden_size
h_zeros = torch.zeros(self.num_layers * num_directions,
max_batch_size, real_hidden_size,
dtype=input.dtype, device=input.device)
c_zeros = torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device)
hx = (h_zeros, c_zeros)
else:
if batch_sizes is None: # If not PackedSequence input.
if is_batched:
if (hx[0].dim() != 3 or hx[1].dim() != 3):
msg = ("For batched 3-D input, hx and cx should "
f"also be 3-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors")
raise RuntimeError(msg)
else:
if hx[0].dim() != 2 or hx[1].dim() != 2:
msg = ("For unbatched 2-D input, hx and cx should "
f"also be 2-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors")
raise RuntimeError(msg)
hx = (hx[0].unsqueeze(1), hx[1].unsqueeze(1))
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
if batch_sizes is None:
result = _VF.lstm(input, hx, self._flat_weights, self.bias, self.num_layers,
self.dropout, self.training, self.bidirectional, self.batch_first)
else:
result = _VF.lstm(input, batch_sizes, hx, self._flat_weights, self.bias,
self.num_layers, self.dropout, self.training, self.bidirectional)
output = result[0]
hidden = result[1:]
# xxx: isinstance check needs to be in conditional for TorchScript to compile
if isinstance(orig_input, PackedSequence):
output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
return output_packed, self.permute_hidden(hidden, unsorted_indices)
else:
if not is_batched:
output = output.squeeze(batch_dim)
hidden = (hidden[0].squeeze(1), hidden[1].squeeze(1))
return output, self.permute_hidden(hidden, unsorted_indices)
class GRU(RNNBase):
r"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll}
r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\
h_t = (1 - z_t) * n_t + z_t * h_{(t-1)}
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the input
at time `t`, :math:`h_{(t-1)}` is the hidden state of the layer
at time `t-1` or the initial hidden state at time `0`, and :math:`r_t`,
:math:`z_t`, :math:`n_t` are the reset, update, and new gates, respectively.
:math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product.
In a multilayer GRU, the input :math:`x^{(l)}_t` of the :math:`l` -th layer
(:math:`l >= 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by
dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random
variable which is :math:`0` with probability :attr:`dropout`.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two GRUs together to form a `stacked GRU`,
with the second GRU taking in outputs of the first GRU and
computing the final results. Default: 1
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as `(batch, seq, feature)` instead of `(seq, batch, feature)`.
Note that this does not apply to hidden or cell states. See the
Inputs/Outputs sections below for details. Default: ``False``
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
GRU layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional GRU. Default: ``False``
Inputs: input, h_0
* **input**: tensor of shape :math:`(L, H_{in})` for unbatched input,
:math:`(L, N, H_{in})` when ``batch_first=False`` or
:math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of
the input sequence. The input can also be a packed variable length sequence.
See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
:func:`torch.nn.utils.rnn.pack_sequence` for details.
* **h_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` or
:math:`(D * \text{num\_layers}, N, H_{out})`
containing the initial hidden state for the input sequence. Defaults to zeros if not provided.
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input\_size} \\
H_{out} ={} & \text{hidden\_size}
\end{aligned}
Outputs: output, h_n
* **output**: tensor of shape :math:`(L, D * H_{out})` for unbatched input,
:math:`(L, N, D * H_{out})` when ``batch_first=False`` or
:math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features
`(h_t)` from the last layer of the GRU, for each `t`. If a
:class:`torch.nn.utils.rnn.PackedSequence` has been given as the input, the output
will also be a packed sequence.
* **h_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` or
:math:`(D * \text{num\_layers}, N, H_{out})` containing the final hidden state
for the input sequence.
Attributes:
weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
(W_ir|W_iz|W_in), of shape `(3*hidden_size, input_size)` for `k = 0`.
Otherwise, the shape is `(3*hidden_size, num_directions * hidden_size)`
weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
(W_hr|W_hz|W_hn), of shape `(3*hidden_size, hidden_size)`
bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
(b_ir|b_iz|b_in), of shape `(3*hidden_size)`
bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
(b_hr|b_hz|b_hn), of shape `(3*hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
.. note::
For bidirectional GRUs, forward and backward are directions 0 and 1 respectively.
Example of splitting the output layers when ``batch_first=False``:
``output.view(seq_len, batch, num_directions, hidden_size)``.
.. note::
``batch_first`` argument is ignored for unbatched inputs.
.. include:: ../cudnn_persistent_rnn.rst
Examples::
>>> rnn = nn.GRU(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> output, hn = rnn(input, h0)
"""
def __init__(self, *args, **kwargs):
if 'proj_size' in kwargs:
raise ValueError("proj_size argument is only supported for LSTM, not RNN or GRU")
super(GRU, self).__init__('GRU', *args, **kwargs)
@overload # type: ignore[override]
@torch._jit_internal._overload_method # noqa: F811
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: # noqa: F811
pass
@overload
@torch._jit_internal._overload_method # noqa: F811
def forward(self, input: PackedSequence, hx: Optional[Tensor] = None) -> Tuple[PackedSequence, Tensor]: # noqa: F811
pass
def forward(self, input, hx=None): # noqa: F811
orig_input = input
# xxx: isinstance check needs to be in conditional for TorchScript to compile
if isinstance(orig_input, PackedSequence):
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
else:
batch_sizes = None
is_batched = input.dim() == 3
batch_dim = 0 if self.batch_first else 1
if not is_batched:
input = input.unsqueeze(batch_dim)
if hx is not None:
if hx.dim() != 2:
raise RuntimeError(
f"For unbatched 2-D input, hx should also be 2-D but got {hx.dim()}-D tensor")
hx = hx.unsqueeze(1)
else:
if hx is not None and hx.dim() != 3:
raise RuntimeError(
f"For batched 3-D input, hx should also be 3-D but got {hx.dim()}-D tensor")
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
if hx is None:
num_directions = 2 if self.bidirectional else 1
hx = torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
if batch_sizes is None:
result = _VF.gru(input, hx, self._flat_weights, self.bias, self.num_layers,
self.dropout, self.training, self.bidirectional, self.batch_first)
else:
result = _VF.gru(input, batch_sizes, hx, self._flat_weights, self.bias,
self.num_layers, self.dropout, self.training, self.bidirectional)
output = result[0]
hidden = result[1]
# xxx: isinstance check needs to be in conditional for TorchScript to compile
if isinstance(orig_input, PackedSequence):
output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
return output_packed, self.permute_hidden(hidden, unsorted_indices)
else:
if not is_batched:
output = output.squeeze(batch_dim)
hidden = hidden.squeeze(1)
return output, self.permute_hidden(hidden, unsorted_indices)
class RNNCellBase(Module):
__constants__ = ['input_size', 'hidden_size', 'bias']
input_size: int
hidden_size: int
bias: bool
weight_ih: Tensor
weight_hh: Tensor
# WARNING: bias_ih and bias_hh purposely not defined here.
# See https://github.com/pytorch/pytorch/issues/39670
def __init__(self, input_size: int, hidden_size: int, bias: bool, num_chunks: int,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(RNNCellBase, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = Parameter(torch.empty((num_chunks * hidden_size, input_size), **factory_kwargs))
self.weight_hh = Parameter(torch.empty((num_chunks * hidden_size, hidden_size), **factory_kwargs))
if bias:
self.bias_ih = Parameter(torch.empty(num_chunks * hidden_size, **factory_kwargs))
self.bias_hh = Parameter(torch.empty(num_chunks * hidden_size, **factory_kwargs))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
self.reset_parameters()
def extra_repr(self) -> str:
s = '{input_size}, {hidden_size}'
if 'bias' in self.__dict__ and self.bias is not True:
s += ', bias={bias}'
if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh":
s += ', nonlinearity={nonlinearity}'
return s.format(**self.__dict__)
def reset_parameters(self) -> None:
stdv = 1.0 / math.sqrt(self.hidden_size) if self.hidden_size > 0 else 0
for weight in self.parameters():
init.uniform_(weight, -stdv, stdv)
class RNNCell(RNNCellBase):
r"""An Elman RNN cell with tanh or ReLU non-linearity.
.. math::
h' = \tanh(W_{ih} x + b_{ih} + W_{hh} h + b_{hh})
If :attr:`nonlinearity` is `'relu'`, then ReLU is used in place of tanh.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'``
Inputs: input, hidden
- **input**: tensor containing input features
- **hidden**: tensor containing the initial hidden state
Defaults to zero if not provided.
Outputs: h'
- **h'** of shape `(batch, hidden_size)`: tensor containing the next hidden state
for each element in the batch
Shape:
- input: :math:`(N, H_{in})` or :math:`(H_{in})` tensor containing input features where
:math:`H_{in}` = `input_size`.
- hidden: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the initial hidden
state where :math:`H_{out}` = `hidden_size`. Defaults to zero if not provided.
- output: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the next hidden state.
Attributes:
weight_ih: the learnable input-hidden weights, of shape
`(hidden_size, input_size)`
weight_hh: the learnable hidden-hidden weights, of shape
`(hidden_size, hidden_size)`
bias_ih: the learnable input-hidden bias, of shape `(hidden_size)`
bias_hh: the learnable hidden-hidden bias, of shape `(hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
Examples::
>>> rnn = nn.RNNCell(10, 20)
>>> input = torch.randn(6, 3, 10)
>>> hx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
... hx = rnn(input[i], hx)
... output.append(hx)
"""
__constants__ = ['input_size', 'hidden_size', 'bias', 'nonlinearity']
nonlinearity: str
def __init__(self, input_size: int, hidden_size: int, bias: bool = True, nonlinearity: str = "tanh",
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(RNNCell, self).__init__(input_size, hidden_size, bias, num_chunks=1, **factory_kwargs)
self.nonlinearity = nonlinearity
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
assert input.dim() in (1, 2), \
f"RNNCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor"
is_batched = input.dim() == 2
if not is_batched:
input = input.unsqueeze(0)
if hx is None:
hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
else:
hx = hx.unsqueeze(0) if not is_batched else hx
if self.nonlinearity == "tanh":
ret = _VF.rnn_tanh_cell(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh,
)
elif self.nonlinearity == "relu":
ret = _VF.rnn_relu_cell(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh,
)
else:
ret = input # TODO: remove when jit supports exception flow
raise RuntimeError(
"Unknown nonlinearity: {}".format(self.nonlinearity))
if not is_batched:
ret = ret.squeeze(0)
return ret
class LSTMCell(RNNCellBase):
r"""A long short-term memory (LSTM) cell.
.. math::
\begin{array}{ll}
i = \sigma(W_{ii} x + b_{ii} + W_{hi} h + b_{hi}) \\
f = \sigma(W_{if} x + b_{if} + W_{hf} h + b_{hf}) \\
g = \tanh(W_{ig} x + b_{ig} + W_{hg} h + b_{hg}) \\
o = \sigma(W_{io} x + b_{io} + W_{ho} h + b_{ho}) \\
c' = f * c + i * g \\
h' = o * \tanh(c') \\
\end{array}
where :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
bias: If ``False``, then the layer does not use bias weights `b_ih` and
`b_hh`. Default: ``True``
Inputs: input, (h_0, c_0)
- **input** of shape `(batch, input_size)` or `(input_size)`: tensor containing input features
- **h_0** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the initial hidden state
- **c_0** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the initial cell state
If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero.
Outputs: (h_1, c_1)
- **h_1** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the next hidden state
- **c_1** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the next cell state
Attributes:
weight_ih: the learnable input-hidden weights, of shape
`(4*hidden_size, input_size)`
weight_hh: the learnable hidden-hidden weights, of shape
`(4*hidden_size, hidden_size)`
bias_ih: the learnable input-hidden bias, of shape `(4*hidden_size)`
bias_hh: the learnable hidden-hidden bias, of shape `(4*hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Examples::
>>> rnn = nn.LSTMCell(10, 20) # (input_size, hidden_size)
>>> input = torch.randn(2, 3, 10) # (time_steps, batch, input_size)
>>> hx = torch.randn(3, 20) # (batch, hidden_size)
>>> cx = torch.randn(3, 20)
>>> output = []
>>> for i in range(input.size()[0]):
... hx, cx = rnn(input[i], (hx, cx))
... output.append(hx)
>>> output = torch.stack(output, dim=0)
"""
def __init__(self, input_size: int, hidden_size: int, bias: bool = True,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(LSTMCell, self).__init__(input_size, hidden_size, bias, num_chunks=4, **factory_kwargs)
def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
assert input.dim() in (1, 2), \
f"LSTMCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor"
is_batched = input.dim() == 2
if not is_batched:
input = input.unsqueeze(0)
if hx is None:
zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
hx = (zeros, zeros)
else:
hx = (hx[0].unsqueeze(0), hx[1].unsqueeze(0)) if not is_batched else hx
ret = _VF.lstm_cell(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh,
)
if not is_batched:
ret = (ret[0].squeeze(0), ret[1].squeeze(0))
return ret
class GRUCell(RNNCellBase):
r"""A gated recurrent unit (GRU) cell
.. math::
\begin{array}{ll}
r = \sigma(W_{ir} x + b_{ir} + W_{hr} h + b_{hr}) \\
z = \sigma(W_{iz} x + b_{iz} + W_{hz} h + b_{hz}) \\
n = \tanh(W_{in} x + b_{in} + r * (W_{hn} h + b_{hn})) \\
h' = (1 - z) * n + z * h
\end{array}
where :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
bias: If ``False``, then the layer does not use bias weights `b_ih` and
`b_hh`. Default: ``True``
Inputs: input, hidden
- **input** : tensor containing input features
- **hidden** : tensor containing the initial hidden
state for each element in the batch.
Defaults to zero if not provided.
Outputs: h'
- **h'** : tensor containing the next hidden state
for each element in the batch
Shape:
- input: :math:`(N, H_{in})` or :math:`(H_{in})` tensor containing input features where
:math:`H_{in}` = `input_size`.
- hidden: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the initial hidden
state where :math:`H_{out}` = `hidden_size`. Defaults to zero if not provided.
- output: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the next hidden state.
Attributes:
weight_ih: the learnable input-hidden weights, of shape
`(3*hidden_size, input_size)`
weight_hh: the learnable hidden-hidden weights, of shape
`(3*hidden_size, hidden_size)`
bias_ih: the learnable input-hidden bias, of shape `(3*hidden_size)`
bias_hh: the learnable hidden-hidden bias, of shape `(3*hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
Examples::
>>> rnn = nn.GRUCell(10, 20)
>>> input = torch.randn(6, 3, 10)
>>> hx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
... hx = rnn(input[i], hx)
... output.append(hx)
"""
def __init__(self, input_size: int, hidden_size: int, bias: bool = True,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(GRUCell, self).__init__(input_size, hidden_size, bias, num_chunks=3, **factory_kwargs)
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
assert input.dim() in (1, 2), \
f"GRUCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor"
is_batched = input.dim() == 2
if not is_batched:
input = input.unsqueeze(0)
if hx is None:
hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
else:
hx = hx.unsqueeze(0) if not is_batched else hx
ret = _VF.gru_cell(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh,
)
if not is_batched:
ret = ret.squeeze(0)
return ret
| pytorch-master | torch/nn/modules/rnn.py |
from .module import Module
from .utils import _pair, _quadruple, _ntuple
from .. import functional as F
from torch import Tensor
from ..common_types import _size_2_t, _size_4_t, _size_6_t
from typing import Sequence, Tuple
# TODO: grad_output size asserts in THNN
__all__ = ['ConstantPad1d', 'ConstantPad2d', 'ConstantPad3d', 'ReflectionPad1d', 'ReflectionPad2d',
'ReflectionPad3d', 'ReplicationPad1d', 'ReplicationPad2d', 'ReplicationPad3d', 'ZeroPad2d']
class _ConstantPadNd(Module):
__constants__ = ['padding', 'value']
value: float
padding: Sequence[int]
def __init__(self, value: float) -> None:
super(_ConstantPadNd, self).__init__()
self.value = value
def forward(self, input: Tensor) -> Tensor:
return F.pad(input, self.padding, 'constant', self.value)
def extra_repr(self) -> str:
return 'padding={}, value={}'.format(self.padding, self.value)
class ConstantPad1d(_ConstantPadNd):
r"""Pads the input tensor boundaries with a constant value.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in both boundaries. If a 2-`tuple`, uses
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
Shape:
- Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
- Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = nn.ConstantPad1d(2, 3.5)
>>> input = torch.randn(1, 2, 4)
>>> input
tensor([[[-1.0491, -0.7152, -0.0749, 0.8530],
[-1.3287, 1.8966, 0.1466, -0.2771]]])
>>> m(input)
tensor([[[ 3.5000, 3.5000, -1.0491, -0.7152, -0.0749, 0.8530, 3.5000,
3.5000],
[ 3.5000, 3.5000, -1.3287, 1.8966, 0.1466, -0.2771, 3.5000,
3.5000]]])
>>> m = nn.ConstantPad1d(2, 3.5)
>>> input = torch.randn(1, 2, 3)
>>> input
tensor([[[ 1.6616, 1.4523, -1.1255],
[-3.6372, 0.1182, -1.8652]]])
>>> m(input)
tensor([[[ 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000, 3.5000],
[ 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000, 3.5000]]])
>>> # using different paddings for different sides
>>> m = nn.ConstantPad1d((3, 1), 3.5)
>>> m(input)
tensor([[[ 3.5000, 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000],
[ 3.5000, 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000]]])
"""
padding: Tuple[int, int]
def __init__(self, padding: _size_2_t, value: float):
super(ConstantPad1d, self).__init__(value)
self.padding = _pair(padding)
class ConstantPad2d(_ConstantPadNd):
r"""Pads the input tensor boundaries with a constant value.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = nn.ConstantPad2d(2, 3.5)
>>> input = torch.randn(1, 2, 2)
>>> input
tensor([[[ 1.6585, 0.4320],
[-0.8701, -0.4649]]])
>>> m(input)
tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
[ 3.5000, 3.5000, 1.6585, 0.4320, 3.5000, 3.5000],
[ 3.5000, 3.5000, -0.8701, -0.4649, 3.5000, 3.5000],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
>>> # using different paddings for different sides
>>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)
>>> m(input)
tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
[ 3.5000, 3.5000, 3.5000, 1.6585, 0.4320],
[ 3.5000, 3.5000, 3.5000, -0.8701, -0.4649],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
"""
__constants__ = ['padding', 'value']
padding: Tuple[int, int, int, int]
def __init__(self, padding: _size_4_t, value: float) -> None:
super(ConstantPad2d, self).__init__(value)
self.padding = _quadruple(padding)
class ConstantPad3d(_ConstantPadNd):
r"""Pads the input tensor boundaries with a constant value.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 6-`tuple`, uses
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
:math:`(C, D_{out}, H_{out}, W_{out})`, where
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> m = nn.ConstantPad3d(3, 3.5)
>>> input = torch.randn(16, 3, 10, 20, 30)
>>> output = m(input)
>>> # using different paddings for different sides
>>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5)
>>> output = m(input)
"""
padding: Tuple[int, int, int, int, int, int]
def __init__(self, padding: _size_6_t, value: float) -> None:
super(ConstantPad3d, self).__init__(value)
self.padding = _ntuple(6)(padding)
class _ReflectionPadNd(Module):
__constants__ = ['padding']
padding: Sequence[int]
def forward(self, input: Tensor) -> Tensor:
return F.pad(input, self.padding, 'reflect')
def extra_repr(self) -> str:
return '{}'.format(self.padding)
class ReflectionPad1d(_ReflectionPadNd):
r"""Pads the input tensor using the reflection of the input boundary.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 2-`tuple`, uses
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
Shape:
- Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
- Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> m = nn.ReflectionPad1d(2)
>>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles")
>>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
>>> input
tensor([[[0., 1., 2., 3.],
[4., 5., 6., 7.]]])
>>> m(input)
tensor([[[2., 1., 0., 1., 2., 3., 2., 1.],
[6., 5., 4., 5., 6., 7., 6., 5.]]])
>>> # using different paddings for different sides
>>> m = nn.ReflectionPad1d((3, 1))
>>> m(input)
tensor([[[3., 2., 1., 0., 1., 2., 3., 2.],
[7., 6., 5., 4., 5., 6., 7., 6.]]])
"""
padding: Tuple[int, int]
def __init__(self, padding: _size_2_t) -> None:
super(ReflectionPad1d, self).__init__()
self.padding = _pair(padding)
class ReflectionPad2d(_ReflectionPadNd):
r"""Pads the input tensor using the reflection of the input boundary.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})` where
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
>>> m = nn.ReflectionPad2d(2)
>>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
>>> input
tensor([[[[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]]]])
>>> m(input)
tensor([[[[8., 7., 6., 7., 8., 7., 6.],
[5., 4., 3., 4., 5., 4., 3.],
[2., 1., 0., 1., 2., 1., 0.],
[5., 4., 3., 4., 5., 4., 3.],
[8., 7., 6., 7., 8., 7., 6.],
[5., 4., 3., 4., 5., 4., 3.],
[2., 1., 0., 1., 2., 1., 0.]]]])
>>> # using different paddings for different sides
>>> m = nn.ReflectionPad2d((1, 1, 2, 0))
>>> m(input)
tensor([[[[7., 6., 7., 8., 7.],
[4., 3., 4., 5., 4.],
[1., 0., 1., 2., 1.],
[4., 3., 4., 5., 4.],
[7., 6., 7., 8., 7.]]]])
"""
padding: Tuple[int, int, int, int]
def __init__(self, padding: _size_4_t) -> None:
super(ReflectionPad2d, self).__init__()
self.padding = _quadruple(padding)
class ReflectionPad3d(_ReflectionPadNd):
r"""Pads the input tensor using the reflection of the input boundary.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 6-`tuple`, uses
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
where
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
>>> m = nn.ReflectionPad3d(1)
>>> input = torch.arange(8, dtype=torch.float).reshape(1, 1, 2, 2, 2)
>>> m(input)
tensor([[[[[7., 6., 7., 6.],
[5., 4., 5., 4.],
[7., 6., 7., 6.],
[5., 4., 5., 4.]],
[[3., 2., 3., 2.],
[1., 0., 1., 0.],
[3., 2., 3., 2.],
[1., 0., 1., 0.]],
[[7., 6., 7., 6.],
[5., 4., 5., 4.],
[7., 6., 7., 6.],
[5., 4., 5., 4.]],
[[3., 2., 3., 2.],
[1., 0., 1., 0.],
[3., 2., 3., 2.],
[1., 0., 1., 0.]]]]])
"""
padding: Tuple[int, int, int, int, int, int]
def __init__(self, padding: _size_6_t) -> None:
super(ReflectionPad3d, self).__init__()
self.padding = _ntuple(6)(padding)
class _ReplicationPadNd(Module):
__constants__ = ['padding']
padding: Sequence[int]
def forward(self, input: Tensor) -> Tensor:
return F.pad(input, self.padding, 'replicate')
def extra_repr(self) -> str:
return '{}'.format(self.padding)
class ReplicationPad1d(_ReplicationPadNd):
r"""Pads the input tensor using replication of the input boundary.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 2-`tuple`, uses
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
Shape:
- Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
- Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
>>> m = nn.ReplicationPad1d(2)
>>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
>>> input
tensor([[[0., 1., 2., 3.],
[4., 5., 6., 7.]]])
>>> m(input)
tensor([[[0., 0., 0., 1., 2., 3., 3., 3.],
[4., 4., 4., 5., 6., 7., 7., 7.]]])
>>> # using different paddings for different sides
>>> m = nn.ReplicationPad1d((3, 1))
>>> m(input)
tensor([[[0., 0., 0., 0., 1., 2., 3., 3.],
[4., 4., 4., 4., 5., 6., 7., 7.]]])
"""
padding: Tuple[int, int]
def __init__(self, padding: _size_2_t) -> None:
super(ReplicationPad1d, self).__init__()
self.padding = _pair(padding)
class ReplicationPad2d(_ReplicationPadNd):
r"""Pads the input tensor using replication of the input boundary.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> m = nn.ReplicationPad2d(2)
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
>>> input
tensor([[[[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]]]])
>>> m(input)
tensor([[[[0., 0., 0., 1., 2., 2., 2.],
[0., 0., 0., 1., 2., 2., 2.],
[0., 0., 0., 1., 2., 2., 2.],
[3., 3., 3., 4., 5., 5., 5.],
[6., 6., 6., 7., 8., 8., 8.],
[6., 6., 6., 7., 8., 8., 8.],
[6., 6., 6., 7., 8., 8., 8.]]]])
>>> # using different paddings for different sides
>>> m = nn.ReplicationPad2d((1, 1, 2, 0))
>>> m(input)
tensor([[[[0., 0., 1., 2., 2.],
[0., 0., 1., 2., 2.],
[0., 0., 1., 2., 2.],
[3., 3., 4., 5., 5.],
[6., 6., 7., 8., 8.]]]])
"""
padding: Tuple[int, int, int, int]
def __init__(self, padding: _size_4_t) -> None:
super(ReplicationPad2d, self).__init__()
self.padding = _quadruple(padding)
class ReplicationPad3d(_ReplicationPadNd):
r"""Pads the input tensor using replication of the input boundary.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 6-`tuple`, uses
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
where
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = nn.ReplicationPad3d(3)
>>> input = torch.randn(16, 3, 8, 320, 480)
>>> output = m(input)
>>> # using different paddings for different sides
>>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1))
>>> output = m(input)
"""
padding: Tuple[int, int, int, int, int, int]
def __init__(self, padding: _size_6_t) -> None:
super(ReplicationPad3d, self).__init__()
self.padding = _ntuple(6)(padding)
class ZeroPad2d(ConstantPad2d):
r"""Pads the input tensor boundaries with zero.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = nn.ZeroPad2d(2)
>>> input = torch.randn(1, 1, 3, 3)
>>> input
tensor([[[[-0.1678, -0.4418, 1.9466],
[ 0.9604, -0.4219, -0.5241],
[-0.9162, -0.5436, -0.6446]]]])
>>> m(input)
tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, -0.1678, -0.4418, 1.9466, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.9604, -0.4219, -0.5241, 0.0000, 0.0000],
[ 0.0000, 0.0000, -0.9162, -0.5436, -0.6446, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
>>> # using different paddings for different sides
>>> m = nn.ZeroPad2d((1, 1, 2, 0))
>>> m(input)
tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, -0.1678, -0.4418, 1.9466, 0.0000],
[ 0.0000, 0.9604, -0.4219, -0.5241, 0.0000],
[ 0.0000, -0.9162, -0.5436, -0.6446, 0.0000]]]])
"""
padding: Tuple[int, int, int, int]
def __init__(self, padding: _size_4_t) -> None:
super(ZeroPad2d, self).__init__(padding, 0.)
def extra_repr(self) -> str:
return '{}'.format(self.padding)
| pytorch-master | torch/nn/modules/padding.py |
# -*- coding: utf-8 -*-
from .module import Module
from .. import functional as F
from torch import Tensor
from ..common_types import _size_any_t
__all__ = ['Fold', 'Unfold']
class Fold(Module):
r"""Combines an array of sliding local blocks into a large containing
tensor.
Consider a batched :attr:`input` tensor containing sliding local blocks,
e.g., patches of images, of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`,
where :math:`N` is batch dimension, :math:`C \times \prod(\text{kernel\_size})`
is the number of values within a block (a block has :math:`\prod(\text{kernel\_size})`
spatial locations each containing a :math:`C`-channeled vector), and
:math:`L` is the total number of blocks. (This is exactly the
same specification as the output shape of :class:`~torch.nn.Unfold`.) This
operation combines these local blocks into the large :attr:`output` tensor
of shape :math:`(N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)`
by summing the overlapping values. Similar to :class:`~torch.nn.Unfold`, the
arguments must satisfy
.. math::
L = \prod_d \left\lfloor\frac{\text{output\_size}[d] + 2 \times \text{padding}[d] %
- \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor,
where :math:`d` is over all spatial dimensions.
* :attr:`output_size` describes the spatial shape of the large containing
tensor of the sliding local blocks. It is useful to resolve the ambiguity
when multiple input shapes map to same number of sliding blocks, e.g.,
with ``stride > 0``.
The :attr:`padding`, :attr:`stride` and :attr:`dilation` arguments specify
how the sliding blocks are retrieved.
* :attr:`stride` controls the stride for the sliding blocks.
* :attr:`padding` controls the amount of implicit zero-paddings on both
sides for :attr:`padding` number of points for each dimension before
reshaping.
* :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
Args:
output_size (int or tuple): the shape of the spatial dimensions of the
output (i.e., ``output.sizes()[2:]``)
kernel_size (int or tuple): the size of the sliding blocks
stride (int or tuple): the stride of the sliding blocks in the input
spatial dimensions. Default: 1
padding (int or tuple, optional): implicit zero padding to be added on
both sides of input. Default: 0
dilation (int or tuple, optional): a parameter that controls the
stride of elements within the
neighborhood. Default: 1
* If :attr:`output_size`, :attr:`kernel_size`, :attr:`dilation`,
:attr:`padding` or :attr:`stride` is an int or a tuple of length 1 then
their values will be replicated across all spatial dimensions.
* For the case of two output spatial dimensions this operation is sometimes
called ``col2im``.
.. note::
:class:`~torch.nn.Fold` calculates each combined value in the resulting
large tensor by summing all values from all containing blocks.
:class:`~torch.nn.Unfold` extracts the values in the local blocks by
copying from the large tensor. So, if the blocks overlap, they are not
inverses of each other.
In general, folding and unfolding operations are related as
follows. Consider :class:`~torch.nn.Fold` and
:class:`~torch.nn.Unfold` instances created with the same
parameters:
>>> fold_params = dict(kernel_size=..., dilation=..., padding=..., stride=...)
>>> fold = nn.Fold(output_size=..., **fold_params)
>>> unfold = nn.Unfold(**fold_params)
Then for any (supported) ``input`` tensor the following
equality holds:
::
fold(unfold(input)) == divisor * input
where ``divisor`` is a tensor that depends only on the shape
and dtype of the ``input``:
>>> # xdoctest: +SKIP
>>> input_ones = torch.ones(input.shape, dtype=input.dtype)
>>> divisor = fold(unfold(input_ones))
When the ``divisor`` tensor contains no zero elements, then
``fold`` and ``unfold`` operations are inverses of each
other (up to constant divisor).
.. warning::
Currently, only unbatched (3D) or batched (4D) image-like output tensors are supported.
Shape:
- Input: :math:`(N, C \times \prod(\text{kernel\_size}), L)` or :math:`(C \times \prod(\text{kernel\_size}), L)`
- Output: :math:`(N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)`
or :math:`(C, \text{output\_size}[0], \text{output\_size}[1], \dots)` as described above
Examples::
>>> fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 2))
>>> input = torch.randn(1, 3 * 2 * 2, 12)
>>> output = fold(input)
>>> output.size()
torch.Size([1, 3, 4, 5])
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
__constants__ = ['output_size', 'kernel_size', 'dilation', 'padding',
'stride']
output_size: _size_any_t
kernel_size: _size_any_t
dilation: _size_any_t
padding: _size_any_t
stride: _size_any_t
def __init__(
self,
output_size: _size_any_t,
kernel_size: _size_any_t,
dilation: _size_any_t = 1,
padding: _size_any_t = 0,
stride: _size_any_t = 1
) -> None:
super(Fold, self).__init__()
self.output_size = output_size
self.kernel_size = kernel_size
self.dilation = dilation
self.padding = padding
self.stride = stride
def forward(self, input: Tensor) -> Tensor:
return F.fold(input, self.output_size, self.kernel_size, self.dilation,
self.padding, self.stride)
def extra_repr(self) -> str:
return 'output_size={output_size}, kernel_size={kernel_size}, ' \
'dilation={dilation}, padding={padding}, stride={stride}'.format(
**self.__dict__
)
class Unfold(Module):
r"""Extracts sliding local blocks from a batched input tensor.
Consider a batched :attr:`input` tensor of shape :math:`(N, C, *)`,
where :math:`N` is the batch dimension, :math:`C` is the channel dimension,
and :math:`*` represent arbitrary spatial dimensions. This operation flattens
each sliding :attr:`kernel_size`-sized block within the spatial dimensions
of :attr:`input` into a column (i.e., last dimension) of a 3-D :attr:`output`
tensor of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`, where
:math:`C \times \prod(\text{kernel\_size})` is the total number of values
within each block (a block has :math:`\prod(\text{kernel\_size})` spatial
locations each containing a :math:`C`-channeled vector), and :math:`L` is
the total number of such blocks:
.. math::
L = \prod_d \left\lfloor\frac{\text{spatial\_size}[d] + 2 \times \text{padding}[d] %
- \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor,
where :math:`\text{spatial\_size}` is formed by the spatial dimensions
of :attr:`input` (:math:`*` above), and :math:`d` is over all spatial
dimensions.
Therefore, indexing :attr:`output` at the last dimension (column dimension)
gives all values within a certain block.
The :attr:`padding`, :attr:`stride` and :attr:`dilation` arguments specify
how the sliding blocks are retrieved.
* :attr:`stride` controls the stride for the sliding blocks.
* :attr:`padding` controls the amount of implicit zero-paddings on both
sides for :attr:`padding` number of points for each dimension before
reshaping.
* :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
Args:
kernel_size (int or tuple): the size of the sliding blocks
stride (int or tuple, optional): the stride of the sliding blocks in the input
spatial dimensions. Default: 1
padding (int or tuple, optional): implicit zero padding to be added on
both sides of input. Default: 0
dilation (int or tuple, optional): a parameter that controls the
stride of elements within the
neighborhood. Default: 1
* If :attr:`kernel_size`, :attr:`dilation`, :attr:`padding` or
:attr:`stride` is an int or a tuple of length 1, their values will be
replicated across all spatial dimensions.
* For the case of two input spatial dimensions this operation is sometimes
called ``im2col``.
.. note::
:class:`~torch.nn.Fold` calculates each combined value in the resulting
large tensor by summing all values from all containing blocks.
:class:`~torch.nn.Unfold` extracts the values in the local blocks by
copying from the large tensor. So, if the blocks overlap, they are not
inverses of each other.
In general, folding and unfolding operations are related as
follows. Consider :class:`~torch.nn.Fold` and
:class:`~torch.nn.Unfold` instances created with the same
parameters:
>>> fold_params = dict(kernel_size=..., dilation=..., padding=..., stride=...)
>>> fold = nn.Fold(output_size=..., **fold_params)
>>> unfold = nn.Unfold(**fold_params)
Then for any (supported) ``input`` tensor the following
equality holds:
::
fold(unfold(input)) == divisor * input
where ``divisor`` is a tensor that depends only on the shape
and dtype of the ``input``:
>>> # xdoctest: +SKIP
>>> input_ones = torch.ones(input.shape, dtype=input.dtype)
>>> divisor = fold(unfold(input_ones))
When the ``divisor`` tensor contains no zero elements, then
``fold`` and ``unfold`` operations are inverses of each
other (up to constant divisor).
.. warning::
Currently, only 4-D input tensors (batched image-like tensors) are
supported.
Shape:
- Input: :math:`(N, C, *)`
- Output: :math:`(N, C \times \prod(\text{kernel\_size}), L)` as described above
Examples::
>>> unfold = nn.Unfold(kernel_size=(2, 3))
>>> input = torch.randn(2, 5, 3, 4)
>>> output = unfold(input)
>>> # each patch contains 30 values (2x3=6 vectors, each of 5 channels)
>>> # 4 blocks (2x3 kernels) in total in the 3x4 input
>>> output.size()
torch.Size([2, 30, 4])
>>> # xdoctest: +IGNORE_WANT
>>> # Convolution is equivalent with Unfold + Matrix Multiplication + Fold (or view to output shape)
>>> inp = torch.randn(1, 3, 10, 12)
>>> w = torch.randn(2, 3, 4, 5)
>>> inp_unf = torch.nn.functional.unfold(inp, (4, 5))
>>> out_unf = inp_unf.transpose(1, 2).matmul(w.view(w.size(0), -1).t()).transpose(1, 2)
>>> out = torch.nn.functional.fold(out_unf, (7, 8), (1, 1))
>>> # or equivalently (and avoiding a copy),
>>> # out = out_unf.view(1, 2, 7, 8)
>>> (torch.nn.functional.conv2d(inp, w) - out).abs().max()
tensor(1.9073e-06)
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
__constants__ = ['kernel_size', 'dilation', 'padding', 'stride']
kernel_size: _size_any_t
dilation: _size_any_t
padding: _size_any_t
stride: _size_any_t
def __init__(
self,
kernel_size: _size_any_t,
dilation: _size_any_t = 1,
padding: _size_any_t = 0,
stride: _size_any_t = 1
) -> None:
super(Unfold, self).__init__()
self.kernel_size = kernel_size
self.dilation = dilation
self.padding = padding
self.stride = stride
def forward(self, input: Tensor) -> Tensor:
return F.unfold(input, self.kernel_size, self.dilation,
self.padding, self.stride)
def extra_repr(self) -> str:
return 'kernel_size={kernel_size}, dilation={dilation}, padding={padding},' \
' stride={stride}'.format(**self.__dict__)
| pytorch-master | torch/nn/modules/fold.py |
"""Utilities for converting and operating on ONNX, JIT and torch types."""
from __future__ import annotations
import enum
from typing import Dict, Optional, Union
from typing_extensions import Literal
import torch
from torch._C import _onnx as _C_onnx
ScalarName = Literal[
"Byte",
"Char",
"Double",
"Float",
"Half",
"Int",
"Long",
"Short",
"Bool",
"ComplexHalf",
"ComplexFloat",
"ComplexDouble",
"QInt8",
"QUInt8",
"QInt32",
"BFloat16",
"Undefined",
]
TorchName = Literal[
"bool",
"uint8_t",
"int8_t",
"double",
"float",
"half",
"int",
"int64_t",
"int16_t",
"complex32",
"complex64",
"complex128",
"qint8",
"quint8",
"qint32",
"bfloat16",
]
class JitScalarType(enum.IntEnum):
"""Scalar types defined in torch.
Use ``JitScalarType`` to convert from torch and JIT scalar types to ONNX scalar types.
Examples::
>>> JitScalarType.from_name("Float").onnx_type()
TensorProtoDataType.FLOAT
"""
# Order defined in https://github.com/pytorch/pytorch/blob/344defc9733a45fee8d0c4d3f5530f631e823196/c10/core/ScalarType.h
UINT8 = 0
INT8 = enum.auto() # 1
INT16 = enum.auto() # 2
INT = enum.auto() # 3
INT64 = enum.auto() # 4
HALF = enum.auto() # 5
FLOAT = enum.auto() # 6
DOUBLE = enum.auto() # 7
COMPLEX32 = enum.auto() # 8
COMPLEX64 = enum.auto() # 9
COMPLEX128 = enum.auto() # 10
BOOL = enum.auto() # 11
QINT8 = enum.auto() # 12
QUINT8 = enum.auto() # 13
QINT32 = enum.auto() # 14
BFLOAT16 = enum.auto() # 15
UNDEFINED = enum.auto() # 16
@classmethod
def from_name(
cls, name: Union[ScalarName, TorchName, Optional[str]]
) -> JitScalarType:
"""Convert a JIT scalar type or torch type name to ScalarType.
Args:
name: JIT scalar type name (Byte) or torch type name (uint8_t).
Returns:
ScalarType.
Raises:
ValueError: if name is not a valid scalar type name or if it is None.
"""
if name is None:
raise ValueError("Scalar type name cannot be None")
if valid_scalar_name(name):
return _SCALAR_NAME_TO_TYPE[name] # type: ignore[index]
if valid_torch_name(name):
return _TORCH_NAME_TO_SCALAR_TYPE[name] # type: ignore[index]
raise ValueError(f"Unknown torch or scalar type: '{name}'")
@classmethod
def from_dtype(cls, dtype: torch.dtype) -> JitScalarType:
"""Convert a torch dtype to ScalarType."""
if dtype not in _DTYPE_TO_SCALAR_TYPE:
raise ValueError(f"Unknown dtype: {dtype}")
return _DTYPE_TO_SCALAR_TYPE[dtype]
def scalar_name(self) -> ScalarName:
"""Convert a ScalarType to a JIT scalar type name."""
return _SCALAR_TYPE_TO_NAME[self]
def torch_name(self) -> TorchName:
"""Convert a ScalarType to a torch type name."""
return _SCALAR_TYPE_TO_TORCH_NAME[self]
def dtype(self) -> torch.dtype:
"""Convert a ScalarType to a torch dtype."""
return _SCALAR_TYPE_TO_DTYPE[self]
def onnx_type(self) -> _C_onnx.TensorProtoDataType:
"""Convert a ScalarType to an ONNX data type."""
if self not in _SCALAR_TYPE_TO_ONNX:
raise ValueError(f"Scalar type {self} cannot be converted to ONNX")
return _SCALAR_TYPE_TO_ONNX[self]
def onnx_compatible(self) -> bool:
"""Return whether this ScalarType is compatible with ONNX."""
return (
self in _SCALAR_TYPE_TO_ONNX
and self != JitScalarType.UNDEFINED
and self != JitScalarType.COMPLEX32
)
def valid_scalar_name(scalar_name: Union[ScalarName, str]) -> bool:
"""Return whether the given scalar name is a valid JIT scalar type name."""
return scalar_name in _SCALAR_NAME_TO_TYPE
def valid_torch_name(torch_name: Union[TorchName, str]) -> bool:
"""Return whether the given torch name is a valid torch type name."""
return torch_name in _TORCH_NAME_TO_SCALAR_TYPE
# https://github.com/pytorch/pytorch/blob/344defc9733a45fee8d0c4d3f5530f631e823196/c10/core/ScalarType.h
_SCALAR_TYPE_TO_NAME: Dict[JitScalarType, ScalarName] = {
JitScalarType.BOOL: "Bool",
JitScalarType.UINT8: "Byte",
JitScalarType.INT8: "Char",
JitScalarType.INT16: "Short",
JitScalarType.INT: "Int",
JitScalarType.INT64: "Long",
JitScalarType.HALF: "Half",
JitScalarType.FLOAT: "Float",
JitScalarType.DOUBLE: "Double",
JitScalarType.COMPLEX32: "ComplexHalf",
JitScalarType.COMPLEX64: "ComplexFloat",
JitScalarType.COMPLEX128: "ComplexDouble",
JitScalarType.QINT8: "QInt8",
JitScalarType.QUINT8: "QUInt8",
JitScalarType.QINT32: "QInt32",
JitScalarType.BFLOAT16: "BFloat16",
JitScalarType.UNDEFINED: "Undefined",
}
_SCALAR_NAME_TO_TYPE: Dict[ScalarName, JitScalarType] = {
v: k for k, v in _SCALAR_TYPE_TO_NAME.items()
}
_SCALAR_TYPE_TO_TORCH_NAME: Dict[JitScalarType, TorchName] = {
JitScalarType.BOOL: "bool",
JitScalarType.UINT8: "uint8_t",
JitScalarType.INT8: "int8_t",
JitScalarType.INT16: "int16_t",
JitScalarType.INT: "int",
JitScalarType.INT64: "int64_t",
JitScalarType.HALF: "half",
JitScalarType.FLOAT: "float",
JitScalarType.DOUBLE: "double",
JitScalarType.COMPLEX32: "complex32",
JitScalarType.COMPLEX64: "complex64",
JitScalarType.COMPLEX128: "complex128",
JitScalarType.QINT8: "qint8",
JitScalarType.QUINT8: "quint8",
JitScalarType.QINT32: "qint32",
JitScalarType.BFLOAT16: "bfloat16",
}
_TORCH_NAME_TO_SCALAR_TYPE: Dict[TorchName, JitScalarType] = {
v: k for k, v in _SCALAR_TYPE_TO_TORCH_NAME.items()
}
_SCALAR_TYPE_TO_ONNX = {
JitScalarType.BOOL: _C_onnx.TensorProtoDataType.BOOL,
JitScalarType.UINT8: _C_onnx.TensorProtoDataType.UINT8,
JitScalarType.INT8: _C_onnx.TensorProtoDataType.INT8,
JitScalarType.INT16: _C_onnx.TensorProtoDataType.INT16,
JitScalarType.INT: _C_onnx.TensorProtoDataType.INT32,
JitScalarType.INT64: _C_onnx.TensorProtoDataType.INT64,
JitScalarType.HALF: _C_onnx.TensorProtoDataType.FLOAT16,
JitScalarType.FLOAT: _C_onnx.TensorProtoDataType.FLOAT,
JitScalarType.DOUBLE: _C_onnx.TensorProtoDataType.DOUBLE,
JitScalarType.COMPLEX64: _C_onnx.TensorProtoDataType.COMPLEX64,
JitScalarType.COMPLEX128: _C_onnx.TensorProtoDataType.COMPLEX128,
JitScalarType.BFLOAT16: _C_onnx.TensorProtoDataType.BFLOAT16,
JitScalarType.UNDEFINED: _C_onnx.TensorProtoDataType.UNDEFINED,
JitScalarType.COMPLEX32: _C_onnx.TensorProtoDataType.UNDEFINED,
JitScalarType.QINT8: _C_onnx.TensorProtoDataType.INT8,
JitScalarType.QUINT8: _C_onnx.TensorProtoDataType.UINT8,
JitScalarType.QINT32: _C_onnx.TensorProtoDataType.INT32,
}
# source of truth is
# https://github.com/pytorch/pytorch/blob/master/torch/csrc/utils/tensor_dtypes.cpp
_SCALAR_TYPE_TO_DTYPE = {
JitScalarType.BOOL: torch.bool,
JitScalarType.UINT8: torch.uint8,
JitScalarType.INT8: torch.int8,
JitScalarType.INT16: torch.short,
JitScalarType.INT: torch.int,
JitScalarType.INT64: torch.int64,
JitScalarType.HALF: torch.half,
JitScalarType.FLOAT: torch.float,
JitScalarType.DOUBLE: torch.double,
JitScalarType.COMPLEX32: torch.complex32,
JitScalarType.COMPLEX64: torch.complex64,
JitScalarType.COMPLEX128: torch.complex128,
JitScalarType.QINT8: torch.qint8,
JitScalarType.QUINT8: torch.quint8,
JitScalarType.QINT32: torch.qint32,
JitScalarType.BFLOAT16: torch.bfloat16,
}
_DTYPE_TO_SCALAR_TYPE = {v: k for k, v in _SCALAR_TYPE_TO_DTYPE.items()}
| pytorch-master | torch/onnx/_type_utils.py |
"""Constant values used in ONNX."""
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
onnx_default_opset = 13
onnx_main_opset = 16
onnx_stable_opsets = tuple(range(7, onnx_main_opset))
onnx_constant_folding_opsets = tuple(range(9, onnx_main_opset + 1))
PYTORCH_GITHUB_ISSUES_URL = "https://github.com/pytorch/pytorch/issues"
| pytorch-master | torch/onnx/_constants.py |
"""This file exports ONNX ops for opset 15.
Note [ONNX operators that are added/updated in opset 15]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
https://github.com/onnx/onnx/blob/master/docs/Changelog.md#version-15-of-the-default-onnx-operator-set
New operators:
Bernoulli
CastLike
Optional
OptionalGetElement
OptionalHasElement
Updated operators:
BatchNormalization https://github.com/onnx/onnx/pull/3545
Backwards compatible
TODO: test coverage for mixed types inputs.
Pow https://github.com/onnx/onnx/pull/3412
Backwards compatible
TODO: bfloat16 support.
Shape https://github.com/onnx/onnx/pull/3580
Backwards compatible
TODO: optional start/end attribute.
"""
# EDITING THIS FILE? READ THIS FIRST!
# see Note [Edit Symbolic Files] in symbolic_helper.py
import torch
from torch import _C
from torch.onnx import symbolic_helper, symbolic_opset9 as opset9
def __is_(g, self, other):
if symbolic_helper._is_none(other):
if isinstance(self.type(), _C.OptionalType):
none = g.op("OptionalHasElement", self)
return g.op("Not", none)
else:
return g.op("Constant", value_t=torch.BoolTensor([0]))
return opset9.eq(g, self, other)
@opset9.wrap_logical_op_with_negation
def __isnot_(g, self, other):
return __is_(g, self, other)
class Prim:
domain = "prim"
@staticmethod
def unchecked_cast(g, self):
# exists to refine the type of the Value
# if x is Optional[Tensor], unchecked_cast will cast
# x to Tensor, so the rest of the graph knows that x is a Tensor.
if isinstance(self.type(), _C.OptionalType):
return g.op("OptionalGetElement", self)
return self
| pytorch-master | torch/onnx/symbolic_opset15.py |
import inspect
from typing import Dict, List, Union
from torch import _C
from torch.onnx import _constants, symbolic_registry
for v in _constants.onnx_stable_opsets:
symbolic_registry.register_version("", v)
symbolic_registry.register_version("", _constants.onnx_main_opset)
class _TorchSchema:
def __init__(self, schema: Union[_C.FunctionSchema, str]) -> None:
if isinstance(schema, _C.FunctionSchema):
self.name: str = schema.name
self.overload_name: str = schema.overload_name
self.arguments: List[str] = [arg.name for arg in schema.arguments]
self.optional_arguments: List[str] = []
self.returns: List[str] = [ret.name for ret in schema.returns]
self.opsets: List[int] = []
else:
self.name = schema
self.overload_name = ""
self.arguments = []
self.optional_arguments = []
self.returns = []
self.opsets = []
def __str__(self) -> str:
s = f"{self.name}.{self.overload_name}("
s += ", ".join(self.arguments)
s += ") -> ("
s += ", ".join(self.returns)
s += ")"
s += " in opsets "
s += ", ".join(str(opset) for opset in self.opsets)
return s
def __hash__(self):
# TODO(thiagocrepaldi): handle overload_name?
return hash(self.name)
def __eq__(self, other) -> bool:
if not isinstance(other, _TorchSchema):
return False
# TODO(thiagocrepaldi): handle overload_name?
return self.name == other.name
def is_aten(self) -> bool:
return self.name.startswith("aten::")
def is_backward(self) -> bool:
return "backward" in self.name
def _all_aten_forward_schemas():
"""Creates a list of _TorchSchema for all aten schemas."""
torch_schemas = [_TorchSchema(s) for s in _C._jit_get_all_schemas()]
torch_schemas = sorted(torch_schemas, key=lambda x: x.name)
aten_schemas = [s for s in torch_schemas if s.is_aten() and not s.is_backward()]
return aten_schemas
def _symbolic_argument_count(func):
params = []
signature = inspect.signature(func)
optional_params = []
for name, parameter in signature.parameters.items():
if name in {"_outputs", "g"}:
continue
if parameter.default is parameter.empty:
optional_params.append(parameter)
else:
params.append(str(parameter))
return params
def _all_symbolics_schemas():
symbolics_schemas: Dict[str, _TorchSchema] = dict()
for domain, version in symbolic_registry._registry:
for opname, sym_func in symbolic_registry._registry[(domain, version)].items():
symbolics_schema = _TorchSchema("aten::" + opname)
symbolics_schema.arguments = _symbolic_argument_count(sym_func)
if opname in symbolics_schemas:
symbolics_schemas[opname].opsets.append(version)
else:
symbolics_schema.opsets = [version]
symbolics_schemas[opname] = symbolics_schema
return symbolics_schemas
def onnx_supported_ops():
aten_schemas = _all_aten_forward_schemas()
symbolic_schemas = _all_symbolics_schemas()
torch_schemas = set(symbolic_schemas.values())
supported_ops = []
onnx_supported = []
for schema in aten_schemas:
if schema in torch_schemas:
opname = schema.name[6:] # without "aten::" prefix
opsets = symbolic_schemas[opname].opsets
if schema not in supported_ops:
supported_ops.append(symbolic_schemas[opname])
onnx_supported.append((opname, " ".join(str(o) for o in opsets)))
return sorted(onnx_supported, key=lambda x: x[0])
| pytorch-master | torch/onnx/_onnx_supported_ops.py |
"""
Note [ONNX operators that are added/updated from opset 7 to opset 8]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
New operators:
Expand
Updated operators:
Min, Max, Sum, Mean: supports multidirectional broadcasting.
MaxPool: added optional indices output.
Scan
"""
import warnings
from torch.onnx import symbolic_helper, symbolic_opset9 as opset9
block_listed_operators = [
"scan",
"expand",
"expand_as",
"meshgrid",
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"max_pool1d_with_indices",
"max_pool2d_with_indices",
"max_pool3d_with_indices",
]
# NOTE: max, min, sum, mean: broadcasting is not supported in opset 7.
# torch.max (same for torch.min) actually has two interfaces smashed together:
# torch.max(x, dim, keepdim) and torch.max(x, y)
def max(g, self, dim_or_y=None, keepdim=None):
# torch.max(input, other)
if keepdim is None and dim_or_y is not None:
warnings.warn(
"Multidirectional broadcasting is not supported in opset 7. "
"This might cause the onnx model to be incorrect, if inputs to max operators "
"have different shapes"
)
return opset9.max(g, self, dim_or_y, keepdim)
def min(g, self, dim_or_y=None, keepdim=None):
# torch.min(input, other)
if keepdim is None and dim_or_y is not None:
warnings.warn(
"Multidirectional broadcasting is not supported in opset 7. "
"This might cause the onnx model to be incorrect, if inputs to min operators "
"have different shapes"
)
return opset9.min(g, self, dim_or_y, keepdim)
for block_listed_op in block_listed_operators:
vars()[block_listed_op] = symbolic_helper._block_list_in_opset(block_listed_op)
vars()[block_listed_op].__module__ = "torch.onnx.symbolic_opset7"
| pytorch-master | torch/onnx/symbolic_opset7.py |
import importlib
import inspect
from torch.onnx import symbolic_helper, symbolic_opset9 as opset9, symbolic_registry
def register_quantized_ops(domain: str, version: int):
# Register all the non-quantized ops
symbolic_registry.register_version("", version)
# Register all quantized ops
module = importlib.import_module("torch.onnx.symbolic_caffe2")
symbolic_registry._symbolic_versions["caffe2"] = module
quant_version_ops = inspect.getmembers(
symbolic_registry._symbolic_versions["caffe2"]
)
for op in quant_version_ops:
if inspect.isfunction(op[1]) and not symbolic_registry.is_registered_op(
op[0], domain, version
):
aten_q_ops = [
"relu",
"_empty_affine_quantized",
"dequantize",
"quantize_per_tensor",
"upsample_nearest2d",
"avg_pool2d",
"reshape",
"slice",
"cat",
"max_pool2d",
"sigmoid",
]
if op[0] in aten_q_ops:
symbolic_registry.register_op(op[0], op[1], "", version)
symbolic_registry.register_op(op[0], op[1], domain, version)
def _permute_helper(g, input, axes):
quant_args = {
"axes_i": axes,
"Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"),
"Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"),
}
output = g.op("_caffe2::Int8Transpose", input, **quant_args)
symbolic_helper._quantized_ops.add(output)
return output
def nchw2nhwc(g, input):
axes = [0, 2, 3, 1]
return _permute_helper(g, input, axes)
def nhwc2nchw(g, input):
axes = [0, 3, 1, 2]
return _permute_helper(g, input, axes)
def linear_prepack(g, weight, bias):
# Mapping to a dummy caffe2 prepack node.
# During the onnx -> c2 conversion we can look up original weight and bias
# from this node
output = g.op("_caffe2::WeightPrepack", weight, bias)
symbolic_helper._quantized_ops.add(output)
return output
@symbolic_helper.parse_args("v", "v", "v", "f", "i")
def linear(g, input, weight, bias, scale, zero_point):
kwargs = {
"Y_scale_f": scale,
"Y_zero_point_i": zero_point,
}
output = g.op("_caffe2::Int8FC", input, weight, bias, **kwargs)
symbolic_helper._quantized_ops.add(output)
return output
def conv_prepack(g, input, weight, bias, stride, padding, dilation, groups):
# Mapping to a dummy caffe2 prepack node.
# During the onnx -> c2 conversion we can look up original weight and bias
# from this node
output = g.op("_caffe2::WeightPrepack", input, weight, bias)
symbolic_helper._quantized_ops.add(output)
return output
@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "f", "i")
def conv2d(
g, input, weight, bias, stride, padding, dilation, groups, scale, zero_point
):
kernel_size = weight.node()["shape"][1:3]
kwargs = {
"strides_i": stride,
"pads_i": padding + padding,
"dilations_i": dilation,
"group_i": groups,
"kernels_i": kernel_size,
"order_s": "NHWC",
"Y_scale_f": scale,
"Y_zero_point_i": zero_point,
}
output = g.op("_caffe2::Int8Conv", input, weight, bias, **kwargs)
symbolic_helper._quantized_ops.add(output)
return output
@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "f", "i")
def conv2d_relu(
g, input, weight, bias, stride, padding, dilation, groups, scale, zero_point
):
kernel_size = weight.node()["shape"][1:3]
kwargs = {
"strides_i": stride,
"pads_i": padding + padding,
"dilations_i": dilation,
"group_i": groups,
"kernels_i": kernel_size,
"order_s": "NHWC",
"Y_scale_f": scale,
"Y_zero_point_i": zero_point,
}
output = g.op("_caffe2::Int8ConvRelu", input, weight, bias, **kwargs)
symbolic_helper._quantized_ops.add(output)
return output
@symbolic_helper.parse_args("v", "v", "f", "i")
def add(g, input_a, input_b, scale, zero_point):
kwargs = {
"Y_scale_f": scale,
"Y_zero_point_i": zero_point,
}
output = g.op("_caffe2::Int8Add", input_a, input_b, **kwargs)
symbolic_helper._quantized_ops.add(output)
return output
@symbolic_helper.parse_args("v")
def relu(g, input):
if input not in symbolic_helper._quantized_ops:
return opset9.relu(g, input)
kwargs = {
"Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"),
"Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"),
}
output = g.op("_caffe2::Int8Relu", input, **kwargs)
symbolic_helper._quantized_ops.add(output)
return output
@symbolic_helper.parse_args("v", "f", "i", "t")
def quantize_per_tensor(g, input, scale, zero_point, dtype):
kwargs = {
"Y_scale_f": scale,
"Y_zero_point_i": zero_point,
}
output = g.op("_caffe2::Int8Quantize", input, **kwargs)
symbolic_helper._quantized_ops.add(output)
return output
@symbolic_helper.parse_args("v")
def dequantize(g, input):
return g.op("_caffe2::Int8Dequantize", input)
@symbolic_helper.parse_args("v", "t", "t", "t", "t", "t", "t", "t")
def _empty_affine_quantized(
g, input, shape, scale, zero_point, dtype, pin_memory, memory_format, layout
):
return input
def upsample_nearest2d(
g, input, output_size, align_corners=None, scales_h=None, scales_w=None
):
if input not in symbolic_helper._quantized_ops:
return opset9.upsample_nearest2d(g, input, output_size, align_corners)
output_size = symbolic_helper._parse_arg(output_size, "is")
kwargs = {
"output_size_i": output_size,
"Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"),
"Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"),
}
input = nchw2nhwc(g, input)
output = g.op("_caffe2::Int8ResizeNearest", input, **kwargs)
output = nhwc2nchw(g, output)
symbolic_helper._quantized_ops.add(output)
return output
@symbolic_helper.parse_args("v", "is", "is", "is", "is", "i")
def max_pool2d(g, input, kernel_size, stride, padding, dilation, ceil_mode):
if input not in symbolic_helper._quantized_ops:
return opset9.max_pool2d(
g, input, kernel_size, stride, padding, dilation, ceil_mode
)
kwargs = {
"strides_i": stride,
"pads_i": padding + padding,
"kernel_i": kernel_size[0],
"order_s": "NHWC",
"Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"),
"Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"),
}
input = nchw2nhwc(g, input)
output = g.op("_caffe2::Int8MaxPool", input, **kwargs)
output = nhwc2nchw(g, output)
symbolic_helper._quantized_ops.add(output)
return output
@symbolic_helper.parse_args("v", "is", "is", "is", "i", "i", "none")
def avg_pool2d(
g,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override=None,
):
if input not in symbolic_helper._quantized_ops:
return opset9.avg_pool2d(
g,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override,
)
kwargs = {
"strides_i": stride,
"pads_i": padding + padding,
"kernel_i": kernel_size[0],
"order_s": "NHWC",
"Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"),
"Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"),
}
input = nchw2nhwc(g, input)
output = g.op("_caffe2::Int8AveragePool", input, **kwargs)
output = nhwc2nchw(g, output)
symbolic_helper._quantized_ops.add(output)
return output
def reshape(g, input, shape):
if input not in symbolic_helper._quantized_ops:
return opset9.reshape(g, input, shape)
kwargs = {
"Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"),
"Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"),
}
output = g.op("_caffe2::Int8Reshape", input, shape, **kwargs)
symbolic_helper._quantized_ops.add(output)
return output
@symbolic_helper.parse_args("v", "v", "v", "v", "i")
def slice(g, input, dim, start, end, step):
if input not in symbolic_helper._quantized_ops:
return opset9.slice(g, input, dim, start, end, step)
if step != 1:
raise RuntimeError("ONNX quantized slice export only works for step 1.")
start = symbolic_helper._parse_arg(start, "i")
end = symbolic_helper._parse_arg(end, "i")
dim = symbolic_helper._parse_arg(dim, "i")
kwargs = {
"start_idx_i": start,
"end_idx_i": end,
"dim_i": dim,
"Y_scale_f": symbolic_helper._node_get(input.node(), "Y_scale"),
"Y_zero_point_i": symbolic_helper._node_get(input.node(), "Y_zero_point"),
}
output = g.op("_caffe2::Int8Slice", input, **kwargs)
symbolic_helper._quantized_ops.add(output)
return output
def cat(g, tensor_list, dim, scale=None, zero_point=None):
tensors = symbolic_helper._unpack_list(tensor_list)
input = tensors[0]
if input not in symbolic_helper._quantized_ops:
return opset9.cat(g, tensor_list, dim)
dim = symbolic_helper._parse_arg(dim, "i")
kwargs = {
"Y_scale_f": tensors[0].node()["Y_scale"],
"Y_zero_point_i": tensors[0].node()["Y_zero_point"],
}
output = g.op("_caffe2::Int8Concat", *tensors, axis_i=dim, **kwargs)
symbolic_helper._quantized_ops.add(output)
return output
@symbolic_helper.parse_args("v")
def sigmoid(g, input):
if input not in symbolic_helper._quantized_ops:
return opset9.sigmoid(g, input)
# Caffe2 expects the output scale to be 1/2^8
# and output zero_point to be 0 (quint8 type)
out_scale = 1.0 / 256
zero_point = 0
kwargs = {
"Y_scale_f": out_scale,
"Y_zero_point_i": zero_point,
}
output = g.op("_caffe2::Int8Sigmoid", input, **kwargs)
symbolic_helper._quantized_ops.add(output)
return output
| pytorch-master | torch/onnx/symbolic_caffe2.py |
"""This file exports ONNX ops for opset 11."""
import sys
import warnings
from typing import Tuple, Union
import torch
from torch import _C
from torch._C import _onnx as _C_onnx
from torch.onnx import (
_type_utils,
symbolic_helper,
symbolic_opset10 as opset10,
symbolic_opset9 as opset9,
utils,
)
from torch.onnx._globals import GLOBALS
# EDITING THIS FILE? READ THIS FIRST!
# see Note [Edit Symbolic Files] in symbolic_helper.py
__all__ = [
"add",
"append",
"arange",
"argsort",
"avg_pool1d",
"avg_pool2d",
"avg_pool3d",
"cat",
"chunk",
"clamp_max",
"clamp_min",
"clamp",
"constant_pad_nd",
"cumsum",
"Delete",
"embedding_bag",
"embedding_renorm",
"flatten",
"gather",
"hardtanh",
"im2col",
"index_fill",
"index",
"index_copy",
"index_put",
"insert",
"linalg_det",
"linalg_vector_norm",
"logdet",
"masked_scatter",
"masked_select",
"mm",
"narrow",
"normal",
"pad",
"pixel_shuffle",
"pop",
"Prim",
"reflection_pad",
"reflection_pad1d",
"reflection_pad2d",
"reflection_pad3d",
"relu6",
"remainder",
"replication_pad",
"replication_pad1d",
"replication_pad2d",
"replication_pad3d",
"round",
"scatter",
"select",
"size",
"sort",
"split_with_sizes",
"split",
"squeeze",
"stack",
"topk",
"unbind",
"unique_dim",
"unsqueeze",
"upsample_bicubic2d",
"upsample_bilinear2d",
"upsample_linear1d",
"upsample_nearest1d",
"upsample_nearest2d",
"upsample_nearest3d",
"upsample_trilinear3d",
]
@symbolic_helper.parse_args("v", "f", "f")
def hardtanh(g, self, min_val, max_val):
dtype = self.type().scalarType()
if dtype is None:
scalar_type = _type_utils.JitScalarType.FLOAT
else:
scalar_type = _type_utils.JitScalarType.from_name(dtype)
min_val = g.op(
"Constant",
value_t=torch.tensor(min_val, dtype=scalar_type.dtype()),
)
max_val = g.op(
"Constant",
value_t=torch.tensor(max_val, dtype=scalar_type.dtype()),
)
return opset9.op_with_optional_float_cast(
g, "Clip", self, min_val, max_val, opset_before=12
)
def clamp(g, self, min, max):
dtype = self.type().scalarType()
def _cast_if_not_none(tensor, dtype):
if tensor is not None and not symbolic_helper._is_none(tensor):
return g.op(
"Cast",
tensor,
to_i=_type_utils.JitScalarType.from_name(dtype).onnx_type(),
)
else:
return tensor
if dtype is not None:
min = _cast_if_not_none(min, dtype)
max = _cast_if_not_none(max, dtype)
if symbolic_helper._is_none(min):
return clamp_max(g, self, max)
elif symbolic_helper._is_none(max):
return clamp_min(g, self, min)
else:
if (
symbolic_helper._get_tensor_rank(min) == 0
and symbolic_helper._get_tensor_rank(max) == 0
):
return opset9.op_with_optional_float_cast(
g, "Clip", self, min, max, opset_before=12
)
else:
return clamp_max(g, clamp_min(g, self, min), max)
@symbolic_helper.parse_args("v", "v")
def clamp_min(g, self, min):
dtype = self.type().scalarType()
min = g.op("Cast", min, to_i=_type_utils.JitScalarType.from_name(dtype).onnx_type())
if symbolic_helper._get_tensor_rank(min) == 0:
max = opset9.unused(g)
return opset9.op_with_optional_float_cast(
g, "Clip", self, min, max, opset_before=12
)
else:
return opset9.op_with_optional_float_cast(g, "Max", self, min, opset_before=12)
@symbolic_helper.parse_args("v", "v")
def clamp_max(g, self, max):
dtype = self.type().scalarType()
max = g.op("Cast", max, to_i=_type_utils.JitScalarType.from_name(dtype).onnx_type())
if symbolic_helper._get_tensor_rank(max) == 0:
min = opset9.unused(g)
return opset9.op_with_optional_float_cast(
g, "Clip", self, min, max, opset_before=12
)
else:
return opset9.op_with_optional_float_cast(g, "Min", self, max, opset_before=12)
def relu6(g, input):
relu_ = opset9.op_with_optional_float_cast(g, "Relu", input, opset_before=14)
dtype = input.type().scalarType()
if dtype is None:
scalar_type = _type_utils.JitScalarType.FLOAT
else:
scalar_type = _type_utils.JitScalarType.from_name(dtype)
min_val = g.op(
"Constant",
value_t=torch.tensor(0, dtype=scalar_type.dtype()),
)
max_val = g.op(
"Constant",
value_t=torch.tensor(6, dtype=scalar_type.dtype()),
)
return clamp(g, relu_, min_val, max_val)
# Opset 11 gather accepts negative indices
@symbolic_helper.parse_args("v", "i", "v")
def select(g, self, dim, index):
return g.op("Gather", self, index, axis_i=dim)
def index_put(g, self, indices_list_value, values, accumulate=False):
if symbolic_helper._is_packed_list(indices_list_value):
indices_list = symbolic_helper._unpack_list(indices_list_value)
else:
indices_list = [indices_list_value]
if symbolic_helper.is_caffe2_aten_fallback():
args = [self] + indices_list + [values, accumulate]
return g.at("index_put", *args)
accumulate = symbolic_helper._parse_arg(accumulate, "b")
if len(indices_list) == 0:
return values
if len(indices_list) > 1:
for idx_ in range(len(indices_list)):
if indices_list[idx_].type().scalarType() == "Bool": # type: ignore[attr-defined]
# TODO(justinchuby): Remove type ignore after #81112 is checked in.
indices_list[idx_] = g.op("NonZero", indices_list[idx_])
index = indices_list[0]
for ind in indices_list[1:]:
index = opset9.add(g, index, ind)
broadcast_index_shape = g.op("Shape", index)
indices_list = [
symbolic_helper._unsqueeze_helper(
g, opset9.expand(g, ind, broadcast_index_shape, None), [-1]
)
for ind in indices_list
]
index = g.op("Concat", *indices_list, axis_i=-1)
else:
# Replace index_put node with masked_scatter or masked_fill
# when inputs to the index_put node contains a single boolean input.
#
# index_put -> masked_fill
# * input index contains single tensor of Bool type (e.g.: %24 <- %23).
# * input value contains single element (e.g.: %18).
#
# Torch IR
# %mask : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = aten::clone(%0, %6)
# %16 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) =
# aten::to(%8, %26, %27, %11, %12, %28, %29, %15)
# %18 : Float(requires_grad=0, device=cpu) = prim::Constant[value={1}]()
# %23 : Bool(8, strides=[1], device=cpu) = aten::view(%16, %22)
# %24 : Tensor?[] = prim::ListConstruct(%23)
# %25 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) =
# aten::index_put(%mask, %24, %18, %30)
# return (%25)
#
#
# index_put -> masked_scatter
# * input index contains single tensor of Bool type (e.g.: %32 <- %31).
# * input value contains multiple elements (e.g.: %28).
#
# Torch IR
# %mask : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = aten::clone(%0, %6)
# %28 : Float(8, strides=[1], requires_grad=0, device=cpu)
# = prim::Constant[value= 1 1 1 1 1 1 1 1 [ CPUFloatType{8} ]]()
# %15 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)
# = aten::ne(%mask, %some_const)
# %23 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)
# = aten::to(%15, %34, %35, %18, %19, %36, %37, %22)
# %38 : Long(requires_grad=0, device=cpu) = prim::Constant[value={0}]()
# %30 : int[] = prim::Constant[value=[-1]]()
# %31 : Bool(8, strides=[1], device=cpu) = aten::view(%23, %30)
# %32 : Tensor?[] = prim::ListConstruct(%31)
# %33 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)
# = aten::index_put(%mask, %32, %28, %38)
# return (%33)
index = indices_list[0]
bool_inp = index
if bool_inp.type() is not None and bool_inp.type().scalarType() == "Bool": # type: ignore[attr-defined]
# TODO(justinchuby): Remove type ignore after #81112 is checked in.
rank = symbolic_helper._get_tensor_rank(values)
if rank is not None and rank == 0:
return opset9.masked_fill(g, self, bool_inp, values)
return masked_scatter(g, self, bool_inp, values)
broadcast_index_shape = g.op("Shape", index)
index = symbolic_helper._unsqueeze_helper(g, index, [-1])
sub_data_shape = symbolic_helper._slice_helper(
g, g.op("Shape", self), axes=[0], starts=[len(indices_list)], ends=[sys.maxsize]
)
values_shape = g.op("Concat", broadcast_index_shape, sub_data_shape, axis_i=0)
# Check if values is a singular value and expand accordingly
rank = symbolic_helper._get_tensor_rank(values)
if rank is not None and rank == 0:
values = opset9.expand(g, values, values_shape, None)
values = symbolic_helper._reshape_helper(g, values, values_shape)
dtype = self.type().scalarType()
if dtype is not None and dtype != values.type().scalarType():
values = g.op(
"Cast", values, to_i=_type_utils.JitScalarType.from_name(dtype).onnx_type()
)
scalar_type = _type_utils.JitScalarType.from_name(dtype)
if accumulate:
zeros = g.op(
"ConstantOfShape",
g.op("Shape", self),
value_t=torch.tensor([0], dtype=scalar_type.dtype()),
)
result = g.op("ScatterND", zeros, index, values)
result = add(g, self, result)
else:
result = g.op("ScatterND", self, index, values)
return result
@symbolic_helper.parse_args("v", "i")
def pixel_shuffle(g, self, upscale_factor):
rank = symbolic_helper._get_tensor_rank(self)
if rank is not None and rank != 4:
return symbolic_helper._unimplemented("pixel_shuffle", "only support 4d input")
return g.op("DepthToSpace", self, blocksize_i=upscale_factor, mode_s="CRD")
def _interpolate(name, dim, interpolate_mode):
return symbolic_helper._interpolate_helper(name, dim, interpolate_mode)
upsample_nearest1d = _interpolate("upsample_nearest1d", 3, "nearest")
upsample_nearest2d = _interpolate("upsample_nearest2d", 4, "nearest")
upsample_nearest3d = _interpolate("upsample_nearest3d", 5, "nearest")
upsample_linear1d = _interpolate("upsample_linear1d", 3, "linear")
upsample_bilinear2d = _interpolate("upsample_bilinear2d", 4, "linear")
upsample_trilinear3d = _interpolate("upsample_trilinear3d", 5, "linear")
upsample_bicubic2d = _interpolate("upsample_bicubic2d", 4, "cubic")
upsample_nearest1d.__module__ = "torch.onnx.symbolic_opset11"
upsample_nearest2d.__module__ = "torch.onnx.symbolic_opset11"
upsample_nearest3d.__module__ = "torch.onnx.symbolic_opset11"
upsample_linear1d.__module__ = "torch.onnx.symbolic_opset11"
upsample_bilinear2d.__module__ = "torch.onnx.symbolic_opset11"
upsample_trilinear3d.__module__ = "torch.onnx.symbolic_opset11"
upsample_bicubic2d.__module__ = "torch.onnx.symbolic_opset11"
@symbolic_helper.quantized_args(True, False, False, False, False, False, False)
def __interpolate(
g, input, size, scale_factor, mode, align_corners, recompute_scale_factor, antialias
):
return symbolic_helper.__interpolate_helper(
g, input, size, scale_factor, mode, align_corners, recompute_scale_factor
)
@symbolic_helper.parse_args("v", "i", "v", "v")
def gather(g, self, dim, index, sparse_grad=False):
if symbolic_helper._maybe_get_const(sparse_grad, "i"):
return symbolic_helper._unimplemented("gather", "sparse_grad == True")
if symbolic_helper.is_caffe2_aten_fallback():
return g.at("gather", self, dim, index, sparse_grad)
return g.op("GatherElements", self, index, axis_i=dim)
@symbolic_helper.parse_args("v", "i", "v", "v")
def scatter(g, self, dim, index, src):
if symbolic_helper.is_caffe2_aten_fallback():
return g.at("scatter", self, dim, index, src, overload_name="src")
src_type = src.type().scalarType()
src = symbolic_helper._maybe_get_scalar(src)
if symbolic_helper._is_value(src):
return g.op("ScatterElements", self, index, src, axis_i=dim)
else:
# Check if scalar "src" has same type as self (PyTorch allows different
# type for scalar src (but not when src is tensor)). If not, insert Cast node.
if self.type().scalarType() != src_type:
src = g.op(
"Cast",
src,
to_i=_type_utils.JitScalarType.from_name(
self.type().scalarType()
).onnx_type(),
)
return g.op(
"ScatterElements", self, index, opset9.expand_as(g, src, index), axis_i=dim
)
@symbolic_helper.parse_args("v", "i", "none")
def cumsum(g, self, dim, dtype=None):
dim_tensor = g.op("Constant", value_t=torch.tensor(dim, dtype=torch.int))
if dtype and dtype.node().kind() != "prim::Constant":
parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype")
cast = g.op(
"Cast", self, to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type()
)
else:
cast = self
csum = g.op("CumSum", cast, dim_tensor)
return csum
def masked_select(g, self, mask):
index = opset9.nonzero(g, opset9.expand_as(g, mask, self))
return g.op("GatherND", self, index)
def masked_scatter(g, self, mask, source):
index = opset9.nonzero(g, opset9.expand_as(g, mask, self))
# NOTE: source can have more elements than needed.
# It could also have arbitrary shape.
# This is not supported by ONNX::ScatterND, so we need to flatten and slice source tensor.
source = symbolic_helper._reshape_helper(g, source, torch.LongTensor([-1]))
source = symbolic_helper._slice_helper(
g,
source,
axes=torch.LongTensor([0]),
starts=torch.LongTensor([0]),
ends=opset9.size(g, index, torch.LongTensor([0])),
dynamic_slice=True,
)
return g.op("ScatterND", self, index, source)
def _len(g, self):
if (
symbolic_helper._is_tensor_list(self)
or self.node().kind() == "onnx::SplitToSequence"
):
return g.op("SequenceLength", self)
sz_0 = size(g, self, g.op("Constant", value_t=torch.LongTensor([0])))
return symbolic_helper._squeeze_helper(g, sz_0, [0])
def __getitem_(g, self, i):
if symbolic_helper._is_tensor_list(self):
# SequenceAt requires that the input be a List of Tensors
return g.op("SequenceAt", self, i)
else:
from torch.onnx.symbolic_opset9 import __getitem_ as getitem
return getitem(g, self, i)
def _set_item(g, tensor_list, i, v):
tensor_list = g.op("SequenceErase", tensor_list, i)
return g.op("SequenceInsert", tensor_list, v, i)
def append(g, self, tensor):
return g.op("SequenceInsert", self, tensor)
def add(g, self, other, alpha=None):
if symbolic_helper._is_value(self) and symbolic_helper._is_tensor_list(self):
tensor_list_node = other.node()
if tensor_list_node.kind() != "prim::ListConstruct":
return symbolic_helper._unimplemented(
"add", "does not support adding dynamic tensor list to another"
)
tensors = symbolic_helper._unpack_list(other)
l = self
for t in tensors:
l = g.op("SequenceInsert", l, t)
return l
return opset9.add(g, self, other, alpha)
def insert(g, self, pos, tensor):
return g.op("SequenceInsert", self, tensor, pos)
def pop(g, tensor_list, dim):
return g.op("SequenceErase", tensor_list, dim)
def Delete(g, tensor_list, dim):
return g.op("SequenceErase", tensor_list, dim)
def cat(g, tensor_list, dim):
if symbolic_helper._is_packed_list(tensor_list):
return opset9.cat(g, tensor_list, dim)
else:
dim = symbolic_helper._get_const(dim, "i", "dim")
return g.op("ConcatFromSequence", tensor_list, axis_i=dim)
def stack(g, tensor_list, dim):
if symbolic_helper._is_packed_list(tensor_list):
return opset9.stack(g, tensor_list, dim)
else:
dim = symbolic_helper._get_const(dim, "i", "dim")
return g.op("ConcatFromSequence", tensor_list, axis_i=dim, new_axis_i=1)
@symbolic_helper.parse_args("v", "i", "i", "i")
def _unique2(g, self, sorted, return_inverse, return_counts):
u, indices, inverse_indices, counts = g.op(
"Unique", self, sorted_i=sorted, outputs=4
)
return u, inverse_indices, counts
def _avg_pool(name, tuple_fn):
@symbolic_helper.quantized_args(True, False, False, False, False, False, False)
@symbolic_helper.parse_args("v", "is", "is", "is", "i", "i", "none")
def symbolic_fn(
g,
input: _C.Value,
kernel_size: Tuple[int, ...],
stride: Tuple[int, ...],
padding: Union[int, Tuple[int, ...]],
ceil_mode: int,
count_include_pad: int,
divisor_override=None,
):
padding = symbolic_helper._avgpool_helper(
tuple_fn, padding, kernel_size, stride, divisor_override, name
)
if not stride:
stride = kernel_size
if count_include_pad:
input = g.op(
"Pad",
input,
g.op("Constant", value_t=torch.tensor(((0,) * 2 + padding) * 2)),
mode_s="constant",
)
padding = (0,) * len(padding)
output = g.op(
"AveragePool",
input,
kernel_shape_i=tuple_fn(kernel_size),
strides_i=tuple_fn(stride),
pads_i=padding * 2,
ceil_mode_i=ceil_mode,
)
return output
return symbolic_fn
avg_pool1d = _avg_pool("avg_pool1d", torch.nn.modules.utils._single)
avg_pool2d = _avg_pool("avg_pool2d", torch.nn.modules.utils._pair)
avg_pool3d = _avg_pool("avg_pool3d", torch.nn.modules.utils._triple)
@symbolic_helper.parse_args("v", "i", "i", "i", "i")
def unique_dim(g, self, dim, sorted, return_inverse, return_counts):
u, indices, inverse_indices, counts = g.op(
"Unique", self, axis_i=dim, sorted_i=sorted, outputs=4
)
return u, inverse_indices, counts
@symbolic_helper.parse_args("v", "v", "i", "i", "i", "none")
def topk(g, self, k, dim, largest, sorted, out=None):
return symbolic_helper._topk_helper(
g, self, k, dim, largest=largest, sorted=sorted, out=out
)
@symbolic_helper.parse_args("v", "i", "i", "none")
def sort(g, self, dim, decending, out=None):
return symbolic_helper._sort_helper(g, self, dim, decending=decending, out=out)
@symbolic_helper.parse_args("v", "i", "i", "none")
def argsort(g, self, dim, decending, out=None):
_, indices = symbolic_helper._sort_helper(
g, self, dim, decending=decending, out=out
)
return indices
def round(g, self):
return g.op("Round", self)
def remainder(g, input, other):
if symbolic_helper._is_fp(input) or symbolic_helper._is_fp(other):
return opset9.remainder(g, input, other)
return g.op("Mod", input, other, fmod_i=0)
@symbolic_helper.parse_args("v", "v", "i", "i")
def split(g, self, split_size_or_sizes, dim, _outputs=None):
if not symbolic_helper._is_split_static(split_size_or_sizes, _outputs):
split_out = g.op("SplitToSequence", self, split_size_or_sizes, axis_i=dim)
if _outputs is None:
return split_out
# Convert to multiple slice nodes iff number of splits and number of outputs are statically known.
if (
symbolic_helper._is_packed_list(split_size_or_sizes)
and len(symbolic_helper._unpack_list(split_size_or_sizes)) == _outputs
):
split_sizes = [
symbolic_helper._unsqueeze_helper(g, v, [0])
for v in symbolic_helper._unpack_list(split_size_or_sizes)
]
start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
axis = g.op("Constant", value_t=torch.tensor([dim], dtype=torch.long))
res = []
for i in range(_outputs):
end = g.op(
"Add", start, split_sizes[i]
) # split_sizes is a list of same length as _outputs
res.append(g.op("Slice", self, start, end, axis))
start = end
return res
return [
g.op(
"SequenceAt",
split_out,
g.op("Constant", value_t=torch.tensor([i], dtype=torch.long)),
)
for i in range(_outputs)
]
else:
return opset9.split(g, self, split_size_or_sizes, dim, _outputs)
@symbolic_helper.parse_args("v", "v", "i", "i")
def split_with_sizes(g, self, split_sizes, dim, _outputs=None):
return split(g, self, split_sizes, dim, _outputs)
@symbolic_helper.parse_args("v", "i", "i")
def unbind(g, self, dim=0, _outputs=None):
if _outputs is None:
return g.op(
"SplitToSequence",
self,
g.op("Constant", value_t=torch.tensor(1, dtype=torch.long)),
axis_i=dim,
keepdims_i=0,
)
else:
return opset9.unbind(g, self, dim, _outputs)
# Generate paddings in ONNX order based on pad in pytorch.
# Args:
# input: the input tensor.
# pad: the paddings in pytorch.
# The order is dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, ..., dim_m_begin, dim_m_end,
# where m is in range [0, n].
def _prepare_onnx_paddings(g, input, pad):
if (
not symbolic_helper._is_packed_list(pad)
and symbolic_helper._is_list(pad)
and symbolic_helper._is_scalar_list(pad)
):
pad = g.op("ConcatFromSequence", pad, axis_i=0, new_axis_i=1)
# The desired order of paddings is
# dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end.
# n is the dimension of input.
# Assume zero-dimensions in the beginning, pad the "pad" sequence with zeros in the beginning
pad_len = opset9.size(g, pad, g.op("Constant", value_t=torch.tensor([0])))
# Set extension = [0] * (dim * 2 - len(pad))
rank = symbolic_helper._get_tensor_rank(input)
if rank is None:
rank = g.op("Size", g.op("Shape", input))
else:
rank = g.op("Constant", value_t=torch.tensor(rank, dtype=torch.int64))
extension = g.op(
"Sub",
g.op("Mul", rank, g.op("Constant", value_t=torch.tensor(2, dtype=torch.int64))),
pad_len,
)
# Concat pad with extension: paddings = [dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, 0, 0, ... ]
# Currently ONNX only supports int64 type for Pad
pad = g.op("Cast", pad, to_i=_C_onnx.TensorProtoDataType.INT64)
paddings = g.op(
"Concat",
pad,
g.op(
"ConstantOfShape", extension, value_t=torch.tensor([0], dtype=torch.int64)
),
axis_i=0,
)
# Reshape and reverse order and collate first beginnings and then ends
# paddings = [[..., 0, dim_n-1_begin, dim_n_begin],
# [..., 0, dim_n-1_end, dim_n_end]]
# Reshape back to 1-D paddings = [..., 0, dim_n - 1_begin, dim_n_begin, ..., 0, dim_n - 1_end, dim_n_end]
paddings = symbolic_helper._reshape_helper(
g, paddings, g.op("Constant", value_t=torch.tensor([-1, 2]))
)
paddings = g.op("Transpose", opset10.flip(g, paddings, [0]), perm_i=[1, 0])
paddings = symbolic_helper._reshape_helper(
g, paddings, g.op("Constant", value_t=torch.tensor([-1]))
)
padding_c = g.op("Cast", paddings, to_i=_C_onnx.TensorProtoDataType.INT64)
return padding_c
def constant_pad_nd(g, input, padding, value=None):
mode = "constant"
value = symbolic_helper._maybe_get_scalar(value)
value = symbolic_helper._if_scalar_type_as(g, value, input)
pad = _prepare_onnx_paddings(g, input, padding)
return g.op("Pad", input, pad, value, mode_s=mode)
def reflection_pad(g, input, padding):
mode = "reflect"
paddings = _prepare_onnx_paddings(g, input, padding)
return g.op("Pad", input, paddings, mode_s=mode)
def replication_pad(g, input, padding):
mode = "edge"
paddings = _prepare_onnx_paddings(g, input, padding)
return g.op("Pad", input, paddings, mode_s=mode)
reflection_pad1d = reflection_pad
reflection_pad2d = reflection_pad
reflection_pad3d = reflection_pad
replication_pad1d = replication_pad
replication_pad2d = replication_pad
replication_pad3d = replication_pad
def pad(g, input, pad, mode, value):
mode = symbolic_helper._parse_arg(mode, "s")
if mode == "replicate":
return replication_pad(g, input, pad)
elif mode == "reflect":
return reflection_pad(g, input, pad)
elif mode == "constant":
return constant_pad_nd(g, input, pad, value)
elif mode == "circular":
return opset9._pad_circular(g, input, pad)
else:
raise RuntimeError(f"Unrecognized padding mode {mode}")
def linalg_det(g, self):
return g.op("Det", self)
def logdet(g, input):
return opset9.log(g, linalg_det(g, input))
def arange(g, *args):
def _get_arange_dtype(dtype):
dtype = symbolic_helper._maybe_get_const(dtype, "i")
return dtype
if len(args) == 2 or len(args) == 5:
if len(args) == 2:
# aten::arange(Scalar end, Tensor out)
dtype = None
else:
# aten::arange(Scalar end, ScalarType dtype, Layout, Device, bool pin_memory)
dtype = _get_arange_dtype(args[1])
type_, end, start, step = symbolic_helper._arange_cast_helper(
g, end=args[0], dtype=dtype
)
start_default = g.op(
"Constant",
value_t=torch.tensor(0, dtype=type_.dtype()),
)
delta_default = g.op(
"Constant",
value_t=torch.tensor(1, dtype=type_.dtype()),
)
arange_tensor = g.op("Range", start_default, end, delta_default)
elif len(args) == 4 or len(args) == 7:
if len(args) == 4:
# aten::arange(Scalar start, Scalar end, Scalar step, Tensor out)
dtype = None
else:
# aten::arange(Scalar start, Scalar end, Scalar step, ScalarType dtype, Layout, Device, bool pin_memory)
dtype = _get_arange_dtype(args[3])
_, end, start, step = symbolic_helper._arange_cast_helper(
g, start=args[0], end=args[1], step=args[2], dtype=dtype
)
arange_tensor = g.op("Range", start, end, step)
elif len(args) == 6:
# aten::arange(Scalar start, Scalar end, ScalarType dtype, Layout, Device, bool pin_memory)
dtype = _get_arange_dtype(args[2])
type_, end, start, step = symbolic_helper._arange_cast_helper(
g, start=args[0], end=args[1], dtype=dtype
)
delta_default = g.op(
"Constant",
value_t=torch.tensor(1, dtype=type_.dtype()),
)
arange_tensor = g.op("Range", start, end, delta_default)
else:
raise NotImplementedError(
"Unknown aten::arange signature taking " + str(len(args)) + " arguments."
)
return arange_tensor
@symbolic_helper.parse_args("v", "i")
def _dim_arange(g, like, dim):
like_shape = g.op("Shape", like)
stop = g.op(
"Gather", like_shape, g.op("Constant", value_t=torch.tensor(dim)), axis_i=0
)
if symbolic_helper.is_caffe2_aten_fallback():
return g.op("_caffe2::Range", stop)
return arange(g, stop, 4, None, None, None)
def size(g, self, dim=None):
if dim is None:
return g.op("Shape", self)
return symbolic_helper._size_helper(g, self, dim)
def squeeze(g, self, dim=None):
if dim is None:
return g.op("Squeeze", self)
# dim as a tensor
if not symbolic_helper._is_constant(dim):
return symbolic_helper._squeeze_helper(g, self, [dim])
dim = symbolic_helper._get_const(dim, "i", "dim")
input_rank = symbolic_helper._get_tensor_rank(self)
adjusted_dim = dim
if input_rank is not None and dim < 0:
adjusted_dim += input_rank
dim_size = symbolic_helper._get_tensor_dim_size(self, adjusted_dim)
if (dim < 0 and input_rank is None) or dim_size is None:
# If onnx shape inference is not on, export always as dynamic.
# Because we cannot tell if observed static shape is also static at runtime.
# create "cond" node (condition is shape[i]==1)
dim_constant = g.op("Constant", value_t=torch.tensor([dim]))
size = symbolic_helper._size_helper(g, self, dim_constant)
const_one = g.op("Constant", value_t=torch.ones(1, dtype=torch.int64))
cond = g.op("Equal", size, const_one)
# create the "If" node and add the "then" and "else" blocks to it.
if_node_outputs = g.op("If", cond)
if_node = if_node_outputs.node()
if_block = utils._add_block(if_node)
squeeze_ = symbolic_helper._squeeze_helper(if_block, self, [dim])
utils._add_output_to_block(if_block, squeeze_)
else_block = utils._add_block(if_node)
identity_ = else_block.op("Identity", self)
utils._add_output_to_block(else_block, identity_)
return if_node_outputs
# For static input shape
dim = adjusted_dim
if dim_size > 1:
warnings.warn(
"This model contains a squeeze operation on dimension "
+ str(dim)
+ ". The size of "
+ "this dimension in the given input is "
+ str(dim_size)
+ ". The model will "
+ "be exported without the squeeze node. If the model is intended to be used with dynamic "
+ "input shapes, please export with dynamic_axes argument."
)
return self
return symbolic_helper._squeeze_helper(g, self, [dim])
def unsqueeze(g, self, dim):
if symbolic_helper._is_constant(dim):
dim = symbolic_helper._get_const(dim, "i", "dim")
return symbolic_helper._unsqueeze_helper(g, self, [dim])
def mm(g, self, other):
return g.op("Gemm", self, other, beta_f=0.0, alpha_f=1.0)
def index(g, self, index):
if symbolic_helper.is_caffe2_aten_fallback():
return g.at("index", self, index, overload_name="Tensor")
if symbolic_helper._is_packed_list(index):
indices = symbolic_helper._unpack_list(index)
else:
indices = [index]
# Handle single mask index.
if len(indices) == 1:
index = indices[0]
if not symbolic_helper._is_none(index) and (
index.type().scalarType() == "Bool" or index.type().scalarType() == "Byte"
):
index = opset9.nonzero(g, index)
return g.op("GatherND", self, index)
return opset9.index(g, self, index)
def index_fill(g, self, dim, index, value):
dim_value = symbolic_helper._parse_arg(dim, "i")
if symbolic_helper.is_caffe2_aten_fallback():
return g.at(
"index_fill",
self,
index,
value,
overload_name="int_Scalar",
dim_i=dim_value,
)
expanded_index_shape, expanded_index = symbolic_helper._index_fill_reshape_helper(
g, self, dim, index
)
value = symbolic_helper._maybe_get_scalar(value)
value = symbolic_helper._if_scalar_type_as(g, value, self)
expanded_value = opset9.expand(g, value, expanded_index_shape, None)
return scatter(g, self, dim, expanded_index, expanded_value)
def index_copy(g, self, dim, index, source):
dim_value = symbolic_helper._parse_arg(dim, "i")
if symbolic_helper.is_caffe2_aten_fallback():
return g.at("index_copy", self, index, source, dim_i=dim_value)
expanded_index_shape, expanded_index = symbolic_helper._index_fill_reshape_helper(
g, self, dim, index
)
return scatter(g, self, dim, expanded_index, source)
def __rshift_(g, self, other):
# make sure to cast other to self's type
# (when self is long, make sure that other is not float)
if other.type().scalarType() != self.type().scalarType():
other = g.op(
"Cast",
other,
to_i=_type_utils.JitScalarType.from_name(
self.type().scalarType()
).onnx_type(),
)
if self.type().scalarType() == "Byte":
return g.op("BitShift", self, other, direction_s="RIGHT")
two = g.op("Constant", value_t=torch.tensor(2, dtype=torch.float32))
# exponent (same type as self) has to be float or double in onnx::Pow
if not symbolic_helper._is_fp(self):
other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.FLOAT)
two_pow = g.op("Pow", two, other)
two_pow = g.op(
"Cast",
two_pow,
to_i=_type_utils.JitScalarType.from_name(self.type().scalarType()).onnx_type(),
)
rshift = g.op("Div", self, two_pow)
return rshift
def __lshift_(g, self, other):
# make sure to cast other to self's type
# (when self is long, make sure that other is not float)
if other.type().scalarType() != self.type().scalarType():
other = g.op(
"Cast",
other,
to_i=_type_utils.JitScalarType.from_name(
self.type().scalarType()
).onnx_type(),
)
if self.type().scalarType() == "Byte":
return g.op("BitShift", self, other, direction_s="LEFT")
two = g.op("Constant", value_t=torch.tensor(2, dtype=torch.float32))
# exponent (same type as self) has to be float or double in onnx::Pow
if not symbolic_helper._is_fp(self):
other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.FLOAT)
two_pow = g.op("Pow", two, other)
two_pow = g.op(
"Cast",
two_pow,
to_i=_type_utils.JitScalarType.from_name(self.type().scalarType()).onnx_type(),
)
lshift = g.op("Mul", self, two_pow)
return lshift
def _get_im2col_indices_along_dim(
g, input_d, kernel_size_d, dilation_d, padding_d, stride_d
):
# Input is always 4-D (N, C, H, W)
# Calculate indices of sliding blocks along spatial dimension
# Slide kernel over input each dim d:
# each dimension d ranges from 0 to input[d]+2xpadding[d]-dilation[d]x(kernel_size[d]-1)
# with steps = stride
blocks_d = g.op(
"Add", input_d, g.op("Constant", value_t=torch.tensor(padding_d * 2))
)
blocks_d = g.op(
"Sub",
blocks_d,
g.op("Constant", value_t=torch.tensor(dilation_d * (kernel_size_d - 1))),
)
# Stride kernel over input and find starting indices along dim d
blocks_d_indices = g.op(
"Range",
g.op("Constant", value_t=torch.tensor(0)),
blocks_d,
g.op("Constant", value_t=torch.tensor(stride_d)),
)
# Apply dilation on kernel and find its indices along dim d
kernel_grid = torch.arange(0, kernel_size_d * dilation_d, dilation_d)
kernel_grid = g.op("Constant", value_t=kernel_grid.unsqueeze(0))
# Broadcast and add kernel staring positions (indices) with
# kernel_grid along dim d, to get block indices along dim d
blocks_d_indices = symbolic_helper._unsqueeze_helper(
g, blocks_d_indices, [0]
) # Reshape to [1, -1]
kernel_mask = symbolic_helper._reshape_helper(
g, kernel_grid, g.op("Constant", value_t=torch.tensor([-1, 1]))
)
block_mask = g.op("Add", blocks_d_indices, kernel_mask)
return block_mask
def _get_im2col_padded_input(g, input, padding_h, padding_w):
# Input is always 4-D tensor (N, C, H, W)
# Padding tensor has the following format: (padding_h, padding_w)
# Reshape the padding to follow ONNX format: (dim1_begin, dim2_begin,...,dim1_end, dim2_end,...)
pad = g.op("Constant", value_t=torch.LongTensor([0, 0, padding_h, padding_w] * 2))
return g.op("Pad", input, pad)
def _get_im2col_output_shape(g, input, kernel_h, kernel_w):
batch_dim = size(g, input, g.op("Constant", value_t=torch.tensor(0)))
channel_dim = size(g, input, g.op("Constant", value_t=torch.tensor(1)))
channel_unfolded = g.op(
"Mul", channel_dim, g.op("Constant", value_t=torch.tensor(kernel_h * kernel_w))
)
return g.op(
"Concat",
symbolic_helper._unsqueeze_helper(g, batch_dim, [0]),
symbolic_helper._unsqueeze_helper(g, channel_unfolded, [0]),
g.op("Constant", value_t=torch.tensor([-1])),
axis_i=0,
)
@symbolic_helper.parse_args("v", "is", "is", "is", "is")
def im2col(g, input, kernel_size, dilation, padding, stride):
# Input is always 4-D tensor (N, C, H, W)
# All other args are int[2]
input_h = size(g, input, g.op("Constant", value_t=torch.tensor(2)))
input_w = size(g, input, g.op("Constant", value_t=torch.tensor(3)))
stride_h, stride_w = stride[0], stride[1]
padding_h, padding_w = padding[0], padding[1]
dilation_h, dilation_w = dilation[0], dilation[1]
kernel_h, kernel_w = kernel_size[0], kernel_size[1]
blocks_row_indices = _get_im2col_indices_along_dim(
g, input_h, kernel_h, dilation_h, padding_h, stride_h
)
blocks_col_indices = _get_im2col_indices_along_dim(
g, input_w, kernel_w, dilation_w, padding_w, stride_w
)
output_shape = _get_im2col_output_shape(g, input, kernel_h, kernel_w)
padded_input = _get_im2col_padded_input(g, input, padding_h, padding_w)
# For a 4D matrix of size (1, 1, 3, 3) as below with kernel_size=2, stride=1, and dilation=1
# [[[[1., 2., 3.,],
# [4., 5., 6.,],
# [7., 8., 9.,]]]]
# First gather indices along rows (dim=2) with blocks_row_indices = [[0,1], [1,2]] to get:
# [[[[[1., 2., 3.],
# [4., 5., 6.]],
# [[4., 5., 6.],
# [7., 8., 9.]]]]]
# And then gather along cols (dim=4) with blocks_row_indices = [[0,1], [1,2]] to get:
# [[[[[[1., 2.],
# [4., 5.]],
# [[2., 3.],
# [5., 6]]],
# [[[4., 5.],
# [7., 8.]],
# [[5., 6.],
# [8., 9.]]]]]]
# Transpose dims 3 (depth) and 4 (rows), and then reshape to output shape (1, 1, 4, 4) to get:
# [[[1., 2., 4., 5.],
# [2., 3., 5., 6.],
# [4., 5., 7., 8.],
# [5., 6., 8., 9.]]]
output = g.op("Gather", padded_input, blocks_row_indices, axis_i=2)
output = g.op("Gather", output, blocks_col_indices, axis_i=4)
output = g.op("Transpose", output, perm_i=[0, 1, 2, 4, 3, 5])
return symbolic_helper._reshape_helper(g, output, output_shape)
def narrow(g, input, dim, start, length):
end = g.op("Add", start, length)
return symbolic_helper._slice_helper(
g, input, axes=dim, starts=start, ends=end, dynamic_slice=True
)
@symbolic_helper.quantized_args(True, False, False)
@symbolic_helper.parse_args("v", "i", "i")
def flatten(g, input, start_dim, end_dim):
dim = symbolic_helper._get_tensor_rank(input)
if dim == 1:
return input
# use ONNX's Flatten operator for cases where the output shape is 2D
if start_dim == 1:
if end_dim == -1 or (dim is not None and end_dim == dim - 1):
return g.op("Flatten", input, axis_i=start_dim)
elif start_dim == 0:
if end_dim == -2 or (dim is not None and end_dim == dim - 2):
return g.op("Flatten", input, axis_i=end_dim + 1)
if dim is None:
return symbolic_helper._unimplemented(
"dim",
"ONNX and PyTorch use different strategies to split the input. "
"Input rank must be known at export time.",
)
# if end_dim is negative add dim
if end_dim < 0:
end_dim = dim + end_dim
return symbolic_helper._flatten_helper(g, input, start_dim, end_dim, dim)
@symbolic_helper.parse_args("v", "f", "is", "i", "v")
def linalg_vector_norm(g, self, ord, dim, keepdim, dtype):
if ord == 0:
if dim is None:
self = symbolic_helper._reshape_helper(
g, self, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64))
)
keepdim = 0
cond_op = g.op(
"Not", g.op("Equal", self, g.op("Constant", value_t=torch.LongTensor([0])))
)
cond_op = g.op(
"Cast",
cond_op,
to_i=_type_utils.JitScalarType.from_name(
self.type().scalarType()
).onnx_type(),
)
return symbolic_helper._reducesum_helper(
g, cond_op, axes_i=dim, keepdims_i=keepdim
)
else:
return opset9.linalg_vector_norm(g, self, ord, dim, keepdim, dtype)
@symbolic_helper.parse_args("v", "v", "v", "i", "i", "i", "v", "i", "i")
def embedding_bag(
g,
embedding_matrix,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
padding_idx,
):
if scale_grad_by_freq and GLOBALS.export_training:
return symbolic_helper._onnx_unsupported(
"embedding_bag with scale_grad_by_freq for training mode"
)
if padding_idx is not None and padding_idx >= 0:
raise RuntimeError("embedding_bag with padding_idx")
loop_condition = g.op("Constant", value_t=torch.tensor(1))
loop_condition = g.op("Cast", loop_condition, to_i=9)
zero = g.op("Constant", value_t=torch.tensor([0]))
indices_len = symbolic_helper._unsqueeze_helper(
g,
symbolic_helper._size_helper(
g, indices, g.op("Constant", value_t=torch.tensor(0))
),
[0],
)
if not include_last_offset:
offsets = [offsets, indices_len]
offsets = g.op("Concat", *offsets, axis_i=0)
# Offsets holds the starting index position of each bag. So we create a list of the indices slices (determined by
# offsets) and gather those indices in indices_row. Then we use this subset of indices to gather from embeddings.
# The embeddings output is a loop scan output, so we can avoid creating a sequence and inserting elements in.
offsets_starts = symbolic_helper._slice_helper(
g, offsets, axes=[0], starts=[0], ends=[sys.maxsize], steps=[1]
)
offsets_ends = symbolic_helper._slice_helper(
g, offsets, axes=[0], starts=[1], ends=[sys.maxsize], steps=[1]
)
loop_len = symbolic_helper._size_helper(
g, offsets_ends, g.op("Constant", value_t=torch.tensor(0))
)
loop = g.op("Loop", loop_len, loop_condition)
loop_block = utils._add_block(loop.node())
block_input_iter = utils._add_input_to_block(loop_block)
cond = utils._add_input_to_block(loop_block)
indices_start = loop_block.op("Gather", offsets_starts, block_input_iter, axis_i=0)
indices_end = loop_block.op("Gather", offsets_ends, block_input_iter, axis_i=0)
indices_start = symbolic_helper._unsqueeze_helper(loop_block, indices_start, [0])
indices_end = symbolic_helper._unsqueeze_helper(loop_block, indices_end, [0])
indices_row = loop_block.op("Slice", indices, indices_start, indices_end, zero)
embeddings = loop_block.op("Gather", embedding_matrix, indices_row, axis_i=0)
if not symbolic_helper._is_none(per_sample_weights):
per_sample_weights_row = loop_block.op(
"Slice", per_sample_weights, indices_start, indices_end, zero
)
per_sample_weights_row = symbolic_helper._unsqueeze_helper(
loop_block, per_sample_weights_row, [1]
)
embeddings = loop_block.op("Mul", embeddings, per_sample_weights_row)
if mode == 0:
embeddings = symbolic_helper._reducesum_helper(
loop_block, embeddings, axes_i=[0], keepdims_i=0
)
elif mode == 1:
embeddings = loop_block.op("ReduceMean", embeddings, axes_i=[0], keepdims_i=0)
else:
embeddings = loop_block.op("ReduceMax", embeddings, axes_i=[0], keepdims_i=0)
cond_out = loop_block.op("Cast", loop_condition, to_i=9)
utils._add_output_to_block(loop_block, cond_out)
utils._add_output_to_block(loop_block, embeddings)
# aten::embedding_bag returns a tuple of 4 elements: output, offset2bag, bag_size, max_indices.
# But the last three outputs are not used in torch.nn.EmbeddingBag or torch.nn.functional.embedding_bag.
return loop.node().output(), None, None, None
@symbolic_helper.parse_args("v", "v", "f", "f")
def embedding_renorm(g, weight, indices, max_norm, norm_type):
unique_indices = g.op("Unique", indices)
partial_weight = g.op("Gather", weight, unique_indices)
norm_type = int(norm_type)
if norm_type == 1:
norm_type = "ReduceL1"
elif norm_type == 2:
norm_type = "ReduceL2"
else:
raise RuntimeError(
f"Unsupported: ONNX export of embedding_renorm with norm: {norm_type}. "
"Only 1. and 2. are supported."
)
partial_weight_norm = g.op(norm_type, partial_weight, axes_i=[1], keepdims_i=1)
# https://github.com/pytorch/pytorch/blob/0a07488ed2c47765e337e290bd138c0e6e459cbd/aten/src/ATen/native/Embedding.cpp#L177
# Add 1e-7 to prevent division by zero.
partial_weight_norm_ = g.op(
"Add", partial_weight_norm, g.op("Constant", value_t=torch.tensor(1e-7))
)
max_norm = torch.tensor(max_norm)
scales = g.op("Div", max_norm, partial_weight_norm_)
partial_weight_renorm = g.op("Mul", partial_weight, scales)
partial_weight_renorm = g.op(
"Where",
g.op("Greater", partial_weight_norm, max_norm),
partial_weight_renorm,
partial_weight,
)
return g.op(
"ScatterND",
weight,
symbolic_helper._unsqueeze_helper(g, unique_indices, [1]),
partial_weight_renorm,
)
def chunk(g, self, chunks, dim):
# Calculate chunk size for dynamic chunk
dim_size = g.op("Gather", g.op("Shape", self), dim, axis_i=0)
chunk_size_s = g.op(
"Sub", chunks, g.op("Constant", value_t=torch.tensor([1], dtype=torch.long))
)
chunk_size = g.op("Div", g.op("Add", dim_size, chunk_size_s), chunks)
# Create splits vector
chunk_vec = [
opset9.expand(g, chunk_size, chunk_size_s, None),
g.op("Sub", dim_size, g.op("Mul", chunk_size, chunk_size_s)),
]
chunk_vec = g.op("Concat", *chunk_vec, axis_i=0)
return split(g, self, chunk_vec, dim)
def normal(g, loc, scale, seed):
# If you can sample from a given distribution with mean 0 and variance 1, then you can easily sample from a
# scale-location transformation of that distribution, which has mean μ and variance σ's square. If x is a sample
# from a mean 0 and variance 1 distribution then
# σx+μ
# is a sample with mean μ and variance σ's square.
result = opset9.mul(g, scale, g.op("RandomNormalLike", loc))
return add(g, result, loc)
class Prim:
domain = "prim"
@staticmethod
def ConstantChunk(g, self, chunks, dim):
input_shape = g.op("Shape", self)
axis = g.op("Constant", value_t=torch.tensor([dim], dtype=torch.long))
input_shape_dim = g.op("Gather", input_shape, axis, axis_i=0)
start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
chunk_size = g.op("Constant", value_t=torch.tensor([chunks], dtype=torch.long))
chunk_size_minus_1 = g.op(
"Constant", value_t=torch.tensor([chunks - 1], dtype=torch.long)
)
input_shape_dim_shift = g.op("Add", input_shape_dim, chunk_size_minus_1)
chunk_dim = g.op("Div", input_shape_dim_shift, chunk_size)
res = []
for i in range(chunks):
index = g.op("Constant", value_t=torch.tensor([i + 1], dtype=torch.long))
end = g.op("Mul", chunk_dim, index)
res.append(g.op("Slice", self, start, end, axis))
start = end
return res
| pytorch-master | torch/onnx/symbolic_opset11.py |
"""Functions to verify exported ONNX model is functionally equivalent to original PyTorch model.
ONNX Runtime is required, and is used as the ONNX backend for export verification.
"""
from __future__ import annotations
import contextlib
import copy
import difflib
import io
import itertools
import os
import tempfile
import warnings
from typing import Any, Callable, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch._C._onnx as _C_onnx
from torch import _C
from torch.onnx import _constants, _experimental, utils
from torch.onnx._globals import GLOBALS
_ORT_PROVIDERS = ("CPUExecutionProvider",)
def _flatten_tuples(elem):
flattened = []
for t in elem:
if isinstance(t, tuple):
flattened.extend(_flatten_tuples(t))
else:
flattened.append(t)
return flattened
def _to_numpy(elem):
if isinstance(elem, torch.Tensor):
if elem.requires_grad:
return elem.detach().cpu().numpy()
else:
return elem.cpu().numpy()
elif isinstance(elem, (list, tuple)):
return [_to_numpy(inp) for inp in elem]
elif isinstance(elem, (bool, int, float)):
return np.array(elem)
elif isinstance(elem, dict):
flattened = []
for k in elem:
flattened += [_to_numpy(k)] + [_to_numpy(elem[k])]
return flattened
return elem
def _inline_flatten_list(inputs, res_list):
for i in inputs:
res_list.append(i) if not isinstance(
i, (list, tuple)
) else _inline_flatten_list(i, res_list)
return res_list
def _unpack_to_numpy(values, cast_onnx_accepted=True):
value_unpacked = []
for value in values:
value_unpacked.extend(
utils.unpack_quantized_tensor(value, cast_onnx_accepted=cast_onnx_accepted)
)
return [_to_numpy(v) for v in value_unpacked]
def _run_ort(ort_session, inputs):
kw_inputs = {}
if inputs and isinstance(inputs[-1], dict):
kw_inputs = inputs[-1]
inputs = inputs[:-1]
inputs = _unpack_to_numpy(_flatten_tuples(inputs))
ort_inputs = {}
for input_name, input in kw_inputs.items():
ort_inputs[input_name] = _to_numpy(input)
inputs = _to_numpy(inputs)
ort_session_inputs = ort_session.get_inputs()
for i, input in enumerate(inputs):
if i == len(ort_session_inputs) or ort_session_inputs[i].name in ort_inputs:
raise ValueError(
f"got too many positional inputs. inputs: {inputs}. kw_inputs: {kw_inputs}"
)
ort_inputs[ort_session_inputs[i].name] = input
ort_outs = ort_session.run(None, ort_inputs)
return _inline_flatten_list(ort_outs, [])
def _ort_session(
model: Union[str, io.BytesIO], ort_providers: Sequence[str] = _ORT_PROVIDERS
):
try:
import onnxruntime # type: ignore[import]
except ImportError:
raise ImportError("onnxruntime is required for export verification.")
if ort_providers is None:
ort_providers = _ORT_PROVIDERS
session_options = onnxruntime.SessionOptions()
# suppress ort warnings.
# 0:Verbose, 1:Info, 2:Warning. 3:Error, 4:Fatal. Default is 2.
session_options.log_severity_level = 3
ort_session = onnxruntime.InferenceSession(
model if isinstance(model, str) else model.getvalue(),
session_options,
providers=ort_providers,
)
return ort_session
def _compare_ort_pytorch_outputs(
ort_outs: Sequence[np.ndarray],
pt_outs: Sequence[torch.Tensor],
rtol: float,
atol: float,
check_shape: bool,
check_dtype: bool,
acceptable_error_percentage: Optional[float],
):
"""
Compare ONNX Runtime and PyTorch outputs.
Args:
ort_outs: outputs from ONNX Runtime.
pt_outs: outputs from PyTorch.
rtol (float, optional): relative tolerance in comparison between ONNX and PyTorch outputs.
atol (float, optional): absolute tolerance in comparison between ONNX and PyTorch outputs.
acceptable_error_percentage (float, optional): acceptable percentage of element mismatches in comparison.
It should be a float of value between 0.0 and 1.0.
Raises:
AssertionError: if outputs from ONNX model and PyTorch model are not
equal up to specified precision.
ValueError: if arguments provided are invalid.
"""
pt_outs, _ = torch.jit._flatten(pt_outs)
pt_outs = _unpack_to_numpy(pt_outs, cast_onnx_accepted=False)
assert len(ort_outs) == len(
pt_outs
), f"Number of outputs differ ONNX runtime: ({len(ort_outs)}) PyTorch: ({len(pt_outs)})"
if acceptable_error_percentage and (
acceptable_error_percentage > 1.0 or acceptable_error_percentage < 0.0
):
raise ValueError(
"If set, acceptable_error_percentage should be between 0.0 and 1.0"
)
for ort_out, pt_out in zip(ort_outs, pt_outs):
try:
# TODO: Remove `check_shape` option once every shape inconsistent issue is addressed.
if not check_shape:
# Allow different but broadcastable output shapes.
ort_out, pt_out = np.broadcast_arrays(ort_out, pt_out)
torch.testing.assert_close(
ort_out,
pt_out,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
equal_nan=True,
)
except AssertionError as e:
if acceptable_error_percentage:
error_percentage = 1 - np.sum(
np.isclose(ort_out, pt_out, rtol=rtol, atol=atol)
) / np.prod(ort_out.shape)
if error_percentage <= acceptable_error_percentage:
warnings.warn(
f"Suppressed AssertionError:\n{e}.\n"
f"Error percentage {error_percentage} "
f"within acceptable range {acceptable_error_percentage}."
)
continue
raise
def _prepare_input_for_pytorch(args, kwargs):
"""Prepare input for PyTorch model execution.
Any future changes/formatting to the input before dispatching to the PyTorch
model should be made in this function.
Args:
args: positional arguments for PyTorch model forward method.
kwargs: keyword arguments for PyTorch model forward method.
Returns:
args: positional arguments for PyTorch model forward method.
kwargs: keyword arguments for PyTorch model forward method.
"""
if isinstance(args, (torch.Tensor, dict)):
args = (args,)
# In-place operators will update input tensor data as well.
# Thus inputs are replicated before every forward call.
args = copy.deepcopy(args)
if kwargs:
kwargs = copy.deepcopy(kwargs)
else:
kwargs = {}
return args, kwargs
def _prepare_input_for_export(args, kwargs):
"""Prepare input for ONNX model export.
Any future changes/formatting to the input before dispatching to the
:func:`torch.onnx.export` api should be made in this function.
Args:
args: positional arguments for PyTorch model forward method.
kwargs: keyword arguments for PyTorch model forward method.
Returns:
onnx_inputs: positional arguments for ONNX model export, as `args` in
:func:`torch.onnx.export`.
"""
args, kwargs = _prepare_input_for_pytorch(args, kwargs)
if not kwargs and isinstance(args[-1], dict):
onnx_inputs = args + ({},)
elif kwargs:
onnx_inputs = args + (kwargs,)
else:
onnx_inputs = args
return onnx_inputs
def _prepare_input_for_ort(args, kwargs, remained_onnx_input_idx, flatten):
"""Prepare input for ONNX model execution in ONNX Runtime.
Any future changes/formatting to the input before dispatching to the ONNX Runtime
InferenceSession run should be made in this function.
Args:
args: positional arguments for PyTorch model forward method.
kwargs: keyword arguments for PyTorch model forward method.
Returns:
onnx_inputs: positional arguments for ONNX model execution in ONNX Runtime.
"""
onnx_inputs = _prepare_input_for_export(args, kwargs)
if flatten:
onnx_inputs, _ = torch.jit._flatten(onnx_inputs)
elif onnx_inputs and onnx_inputs[-1] == {}:
# Handle empty kwargs (normally removed by flatten).
onnx_inputs = onnx_inputs[:-1]
if remained_onnx_input_idx is not None:
return [onnx_inputs[i] for i in remained_onnx_input_idx]
else:
return onnx_inputs
def _try_clone_model(model):
"""Used for preserving original model in case forward mutates model states."""
try:
return copy.deepcopy(model)
except Exception:
warnings.warn(
"Failed to clone model. Model state might be mutated during verification."
)
return model
def _compare_ort_pytorch_model(
model,
ort_session,
input_args,
input_kwargs,
additional_test_inputs,
remained_onnx_input_idx,
flatten,
rtol,
atol,
check_shape,
check_dtype,
accetable_error_persentage: Optional[float],
):
"""Compare outputs from ONNX model runs with outputs from PyTorch model runs.
ONNX Runtime is used for model execution backend for ONNX model.
Raises:
AssertionError: if outputs from ONNX model and PyTorch model are not
equal up to specified precision.
"""
def compare_ort_pytorch_model_with_input(input_args, input_kwargs):
pt_args, pt_kwargs = _prepare_input_for_pytorch(input_args, input_kwargs)
# TODO: remove this and treat mutating model separately. See #77679
model_copy = _try_clone_model(model)
pt_outs = model_copy(*pt_args, **pt_kwargs)
ort_inputs = _prepare_input_for_ort(
input_args, input_kwargs, remained_onnx_input_idx, flatten
)
ort_outs = _run_ort(ort_session, ort_inputs)
_compare_ort_pytorch_outputs(
ort_outs,
pt_outs,
rtol,
atol,
check_shape,
check_dtype,
accetable_error_persentage,
)
compare_ort_pytorch_model_with_input(input_args, input_kwargs)
if additional_test_inputs:
for test_input_args in additional_test_inputs:
compare_ort_pytorch_model_with_input(test_input_args, {})
class _GraphDiff:
"""A class to represent the difference between two graphs."""
def __init__(self, graph_a: _C.Graph, graph_b: _C.Graph):
"""Construct a _GraphDiff object.
Args:
graph_a (_C.Graph): First graph to compare.
graph_b (_C.Graph): Second graph to compare.
"""
self.graph_a = graph_a
self.graph_b = graph_b
def __str__(self):
"""See function :func:`diff_report`."""
return self.diff_report()
def _indent(self, lines: str) -> str:
return "\n".join(["\t" + line for line in lines.splitlines()])
def diff_report(self) -> str:
"""Return a string representation of the graph difference.
The report shows the first pair of nodes that diverges. It also shows the source
location of the pair of nodes.
Returns:
graph_diff_report (str): A string representation of the graph difference.
"""
graph_a = self.graph_a
graph_b = self.graph_b
graph_a_str = str(graph_a)
graph_b_str = str(graph_b)
if graph_a_str == graph_b_str:
return ""
graph_diff = difflib.ndiff(
graph_a_str.splitlines(True), graph_b_str.splitlines(True)
)
graph_diff_report = ["Graph diff:", self._indent("".join(graph_diff))]
for node_a, node_b in itertools.zip_longest(graph_a.nodes(), graph_b.nodes()):
if str(node_a) != str(node_b):
graph_diff_report.append("First diverging operator:")
node_diff = difflib.ndiff(
str(node_a).splitlines(True), str(node_b).splitlines(True)
)
source_printout = ["node diff:", self._indent("".join(node_diff))]
stack_a = node_a.sourceRange() if node_a else None
if stack_a:
source_printout.extend(
["Former source location:", self._indent(str(stack_a))]
)
stack_b = node_b.sourceRange() if node_b else None
if stack_b:
source_printout.extend(
["Latter source location:", self._indent(str(stack_b))]
)
graph_diff_report.extend(source_printout)
break
return "\n".join(graph_diff_report)
def _check_graph_diff(
model: Union[torch.nn.Module, torch.jit.ScriptModule],
test_input_groups: Sequence[Tuple[Tuple[Any, ...], Mapping[str, Any]]],
export_options: _experimental.ExportOptions,
model_to_graph_func: Callable[
[
torch.nn.Module,
Tuple[Any, ...],
Mapping[str, Any],
_experimental.ExportOptions,
],
_C.Graph,
],
) -> str:
"""Check if graph produced by `model_to_graph_func` is the same across `test_input_groups`.
Args:
model: See :func:`check_export_model_diff`.
test_input_groups: See :func:`check_export_model_diff`.
export_options: See :func:`check_export_model_diff`.
model_to_graph_func: A function to convert a PyTorch model to a JIT IR graph.
Returns:
graph_diff_report (str): A string representation of the graph difference.
"""
if len(test_input_groups) < 2:
raise ValueError("Need at least two groups of test inputs to compare.")
ref_jit_graph = None
for args, kwargs in test_input_groups:
jit_graph = model_to_graph_func(model, args, kwargs, export_options)
if ref_jit_graph is None:
ref_jit_graph = jit_graph
continue
graph_diff_report = _GraphDiff(ref_jit_graph, jit_graph).diff_report()
if graph_diff_report:
return graph_diff_report
return ""
def _traced_graph_from_model(
model: Union[torch.nn.Module, torch.jit.ScriptModule],
args: Tuple[Any, ...],
kwargs: Mapping[str, Any],
export_options: _experimental.ExportOptions,
) -> _C.Graph:
"""As part of the ONNX export steps, create a traced JIT graph from a PyTorch model.
Args:
model: See :func:`check_export_model_diff`.
args: See :func:`check_export_model_diff`.
kwargs: See :func:`check_export_model_diff`.
export_options: See :func:`check_export_model_diff`.
Returns:
jit_graph (_C.Graph): A traced JIT graph.
"""
training = export_options.training
verbose = export_options.verbose
with utils.exporter_context(model, training, verbose):
export_inputs = _prepare_input_for_export(args, kwargs)
model = utils._pre_trace_quant_model(model, export_inputs)
jit_graph, _, _, _ = utils._create_jit_graph(model, export_inputs)
return jit_graph
def _onnx_graph_from_model(
model: Union[torch.nn.Module, torch.jit.ScriptModule],
args: Tuple[Any, ...],
kwargs: Mapping[str, Any],
export_options: _experimental.ExportOptions,
) -> _C.Graph:
"""As part of the ONNX export steps, export an ONNX JIT graph from a PyTorch model.
Args:
model: See :func:`check_export_model_diff`.
args: See :func:`check_export_model_diff`.
kwargs: See :func:`check_export_model_diff`.
export_options: See :func:`check_export_model_diff`.
Returns:
onnx_graph (_C.Graph): An ONNX JIT graph.
"""
# TODO: refactor utils.py to remove duplicated code of context setup. See #78834
opset_version = export_options.opset_version
operator_export_type = export_options.operator_export_type
export_modules_as_functions = export_options.export_modules_as_functions
training = export_options.training
verbose = export_options.verbose
dynamic_axes = export_options.dynamic_axes
input_names = export_options.input_names
output_names = export_options.output_names
if opset_version is None:
opset_version = _constants.onnx_default_opset
export_modules_as_functions = utils._setup_trace_module_map(
model, export_modules_as_functions
)
if not operator_export_type:
if _C_onnx._CAFFE2_ATEN_FALLBACK:
operator_export_type = _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
else:
operator_export_type = _C_onnx.OperatorExportTypes.ONNX
GLOBALS.export_onnx_opset_version = opset_version
GLOBALS.operator_export_type = operator_export_type
with utils.exporter_context(model, training, verbose):
do_constant_folding = utils._decide_constant_folding(
export_options.do_constant_folding, operator_export_type, training
)
if dynamic_axes is None:
dynamic_axes = {}
utils._validate_dynamic_axes(dynamic_axes, model, input_names, output_names)
export_inputs = _prepare_input_for_export(args, kwargs)
export_inputs = utils._decide_input_format(model, export_inputs)
onnx_graph, _, _ = utils._model_to_graph(
model,
export_inputs,
verbose,
input_names,
output_names,
operator_export_type,
do_constant_folding,
training=training,
dynamic_axes=dynamic_axes,
)
return onnx_graph
def check_export_model_diff(
model: Union[torch.nn.Module, torch.jit.ScriptModule],
test_input_groups: Sequence[Tuple[Tuple[Any, ...], Mapping[str, Any]]],
export_options: Optional[_experimental.ExportOptions] = None,
) -> str:
"""Verify exported model discrepancy between different groups of inputs.
A graph is exported for each group of inputs. The exported graphs are then compared
to each other, and discrepancies of first pair of nodes are reported. This function
first checks the jit graph. If no discrepancies were found, it then checks the onnx
graph.
Unless otherwise specified, the jit/ONNX graph is expected to be the same, regardless
of the inputs used for exporting. A discrepancy implies the graph exported is
not accurate when run on other groups of inputs, which will typically results in
runtime errors or mismatching output.
Args:
model (torch.nn.Module or torch.jit.ScriptModule): The model to be exported.
test_input_groups (Sequence[Tuple[Tuple[Any, ...], Mapping[str, Any]]]): A sequence
of input groups to be used to export the model. Each input group is a pair of
(args, kwargs).
export_options (_experimental.ExportOptions, optional): An _experimental.ExportOptions
object that controls the export behavior.
Returns:
str: A string containing the diff of the exported models.
"""
export_options = (
_experimental.ExportOptions() if export_options is None else export_options
)
jit_diff_report = _check_graph_diff(
model, test_input_groups, export_options, _traced_graph_from_model
)
if jit_diff_report:
return jit_diff_report
return _check_graph_diff(
model, test_input_groups, export_options, _onnx_graph_from_model
)
def verify(
model: Union[torch.nn.Module, torch.jit.ScriptModule],
input_args: Tuple[Any, ...],
input_kwargs: Optional[Mapping[str, Any]] = None,
do_constant_folding: bool = True,
dynamic_axes: Optional[
Mapping[str, Union[Mapping[int, str], Mapping[str, Sequence[int]]]]
] = None,
input_names: Optional[Sequence[str]] = None,
output_names: Optional[Sequence[str]] = None,
training: torch.onnx.TrainingMode = torch.onnx.TrainingMode.EVAL,
opset_version: Optional[int] = None,
keep_initializers_as_inputs: bool = True,
verbose: bool = False,
fixed_batch_size: bool = False,
use_external_data: bool = False,
additional_test_inputs: Optional[Sequence[Tuple[Any, ...]]] = None,
remained_onnx_input_idx: Optional[Sequence[int]] = None,
flatten: bool = True,
check_shape: bool = True,
check_dtype: bool = True,
ort_providers: Sequence[str] = _ORT_PROVIDERS,
rtol: float = 0.001,
atol: float = 1e-7,
acceptable_error_percentage: Optional[float] = None,
**_,
):
"""Verify model export to ONNX with ONNX Runtime.
Args:
model (torch.nn.Module or torch.jit.ScriptModule): See :func:`torch.onnx.export`.
input_args (tuple): See :func:`torch.onnx.export`.
input_kwargs (dict): See :func:`torch.onnx.export`.
do_constant_folding (bool, optional): See :func:`torch.onnx.export`.
dynamic_axes (dict, optional): See :func:`torch.onnx.export`.
input_names (list, optional): See :func:`torch.onnx.export`.
output_names (list, optional): See :func:`torch.onnx.export`.
training (torch.onnx.TrainingMode): See :func:`torch.onnx.export`.
opset_version (int, optional): See :func:`torch.onnx.export`.
keep_initializers_as_inputs (bool, optional): See :func:`torch.onnx.export`.
verbose (bool, optional): See :func:`torch.onnx.export`.
fixed_batch_size (bool, optional): Legacy argument, used only by rnn test cases.
use_external_data (bool, optional): Explicitly specify whether to export the
model with external data.
additional_test_inputs (list, optional): List of tuples. Each tuple is a group of
input arguments to test. Currently only *args are supported.
remained_onnx_input_idx (list, optional): If provided, only the specified inputs
will be passed to the ONNX model. Supply a list when there are unused inputs
in the model. Since unused inputs will be removed in the exported ONNX
model, supplying all inputs will cause an error on unexpected inputs.
This parameter tells the verifier which inputs to pass into the ONNX model.
flatten (bool, optional): Default True. If True, unpack nested list/tuple/dict
inputs into a flattened list of Tensors for ONNX. Set this to False if nested
structures are to be preserved for ONNX, which is usually the case with
exporting ScriptModules.
check_shape (bool, optional): Default True. If True, check the shapes between
PyTorch and ONNX Runtime outputs are exactly the same. Set this to False to allow
output shape broadcasting.
check_dtype (bool, optional): Default True. If True, check the dtypes between
PyTorch and ONNX Runtime outputs are consistent.
ort_providers (sequence, optional): ONNX Runtime providers to use.
rtol (float, optional): relative tolerance in comparison between ONNX and PyTorch outputs.
atol (float, optional): absolute tolerance in comparison between ONNX and PyTorch outputs.
acceptable_error_percentage (float, optional): acceptable percentage of element mismatches in comparison.
It should be a float of value between 0.0 and 1.0.
Raises:
AssertionError: if outputs from ONNX model and PyTorch model are not
equal up to specified precision.
ValueError: if arguments provided are invalid.
"""
if training == torch.onnx.TrainingMode.TRAINING:
model.train()
elif training == torch.onnx.TrainingMode.EVAL:
model.eval()
with torch.no_grad(), contextlib.ExitStack() as stack:
model_f: Union[str, io.BytesIO] = io.BytesIO()
if use_external_data:
tmpdir_path = stack.enter_context(tempfile.TemporaryDirectory())
model_f = os.path.join(tmpdir_path, "model.onnx")
inputs_for_export = _prepare_input_for_export(input_args, input_kwargs)
# TODO(#77679): remove this and treat mutating model separately.
model_copy = _try_clone_model(model)
utils._export(
model,
inputs_for_export,
model_f,
opset_version=opset_version,
do_constant_folding=do_constant_folding,
keep_initializers_as_inputs=keep_initializers_as_inputs,
dynamic_axes=dynamic_axes,
input_names=input_names,
output_names=output_names,
fixed_batch_size=fixed_batch_size,
training=training,
verbose=verbose,
)
ort_session = _ort_session(model_f, ort_providers)
_compare_ort_pytorch_model(
model_copy,
ort_session,
input_args,
input_kwargs,
additional_test_inputs,
remained_onnx_input_idx,
flatten,
rtol,
atol,
check_shape,
check_dtype,
acceptable_error_percentage,
)
| pytorch-master | torch/onnx/verification.py |
import sys
import warnings
from typing import Sequence
import torch
import torch._C._onnx as _C_onnx
import torch.onnx
from torch import _C
# Monkey-patch graph manipulation methods on Graph, used for the ONNX symbolics
from torch.onnx import ( # noqa: F401
_patch_torch,
_type_utils,
symbolic_helper,
symbolic_opset9 as opset9,
)
from torch.onnx._globals import GLOBALS
# EDITING THIS FILE? READ THIS FIRST!
# see Note [Edit Symbolic Files] in symbolic_helper.py
# This file exports ONNX ops for opset 10
# Opset 10 is supported by ONNX release 1.5.0
# release on 04/24/19
__all__ = [
"avg_pool1d",
"avg_pool2d",
"avg_pool3d",
"dequantize",
"div",
"embedding_bag",
"fake_quantize_per_tensor_affine",
"flip",
"fmod",
"isfinite",
"isinf",
"max_pool1d_with_indices",
"max_pool1d",
"max_pool2d_with_indices",
"max_pool2d",
"max_pool3d_with_indices",
"max_pool3d",
"nan_to_num",
"quantize_per_tensor",
"Quantized",
"slice",
"sort",
"topk",
"upsample_bilinear2d",
"upsample_linear1d",
"upsample_nearest1d",
"upsample_nearest2d",
"upsample_nearest3d",
"upsample_trilinear3d",
]
def div(g, self, other, *args):
if len(args) == 0:
return opset9.true_divide(g, self, other)
else:
return _div_rounding_mode(g, self, other, *args)
@symbolic_helper.parse_args("v", "v", "s")
def _div_rounding_mode(g, self, other, rounding_mode):
if rounding_mode == "floor":
return _floor_divide(g, self, other)
else:
return opset9._div_rounding_mode(g, self, other, rounding_mode)
def _floor_divide(g, self, other):
if symbolic_helper._is_fp(self) or symbolic_helper._is_fp(other):
out = opset9.true_divide(g, self, other)
return g.op("Floor", out)
else:
# Integer division does trunction rounding
div = g.op("Div", self, other)
# Division is negative if: self < 0 != other < 0
zero = g.op("Constant", value_t=torch.tensor(0, dtype=torch.int64))
negative = g.op("Xor", g.op("Less", self, zero), g.op("Less", other, zero))
# For negative numbers with self % other != 0, subtract 1 to round down instead of up
mod = g.op("Mod", self, other, fmod_i=0)
fixup_mask = g.op("And", negative, g.op("Not", g.op("Equal", mod, zero)))
one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64))
fixup = g.op("Sub", div, one)
return g.op("Where", fixup_mask, fixup, div)
@symbolic_helper.parse_args("v", "i", "i", "none")
def sort(g, self, dim, decending, out=None):
return symbolic_helper._sort_helper(g, self, dim, decending=decending, out=out)
@symbolic_helper.parse_args("v", "v", "i", "i", "i", "none")
def topk(g, self, k, dim, largest, sorted, out=None):
return symbolic_helper._topk_helper(
g, self, k, dim, largest=largest, sorted=sorted, out=out
)
def _max_pool(name, tuple_fn, ndims, return_indices):
@symbolic_helper.quantized_args(True, False, False, False, False, False)
@symbolic_helper.parse_args("v", "is", "is", "is", "is", "i")
def symbolic_fn(g, input, kernel_size, stride, padding, dilation, ceil_mode):
if not stride:
stride = kernel_size
kwargs = {
"kernel_shape_i": tuple_fn(kernel_size),
"pads_i": tuple_fn(padding) * 2,
"strides_i": tuple_fn(stride),
"ceil_mode_i": ceil_mode,
}
if set(tuple_fn(dilation)) != {1}:
kwargs["dilations_i"] = tuple_fn(dilation)
# easy but hacky way to get flattened indices values
# to be used to convert the indices values to non-flattened.
# In ONNX the indices are computed as a flatten 1-D tensor,
# so the values in indices are in [0, N x C x D1 x ... x Dn).
# To convert the indices to the same format used by Pytorch,
# we first execute a maxpool with a kernel and stride of 1 on the same input.
# This will result in a tensor of indices in which each index will have it's own value.
# Using this tensor as a reference, we extract the first index of each axis and subtract
# it from each index of this axis in the indices to convert.
# This step will result in a tensor were each dimension has values of indices within
# the dimension it is in.
# For more information :
# https://github.com/pytorch/pytorch/pull/16455#issuecomment-460776407
if return_indices:
r, indices = g.op("MaxPool", input, outputs=2, **kwargs)
_, flattened_indices = g.op(
"MaxPool",
input,
outputs=2,
kernel_shape_i=[1 for _ in range(ndims)],
strides_i=[1 for _ in range(ndims)],
)
# convert indices to have non-flattened indices values
s = symbolic_helper._slice_helper(
g,
flattened_indices,
axes=[2 + i for i in range(ndims)],
starts=tuple_fn(0),
ends=tuple_fn(1),
)
indices = opset9.sub(g, indices, s)
return r, indices
else:
r = g.op("MaxPool", input, outputs=1, **kwargs)
return r
return symbolic_fn
max_pool1d = _max_pool(
"max_pool1d", torch.nn.modules.utils._single, 1, return_indices=False
)
max_pool2d = _max_pool(
"max_pool2d", torch.nn.modules.utils._pair, 2, return_indices=False
)
max_pool3d = _max_pool(
"max_pool3d", torch.nn.modules.utils._triple, 3, return_indices=False
)
max_pool1d_with_indices = _max_pool(
"max_pool1d_with_indices", torch.nn.modules.utils._single, 1, return_indices=True
)
max_pool2d_with_indices = _max_pool(
"max_pool2d_with_indices", torch.nn.modules.utils._pair, 2, return_indices=True
)
max_pool3d_with_indices = _max_pool(
"max_pool3d_with_indices", torch.nn.modules.utils._triple, 3, return_indices=True
)
def _avg_pool(name, tuple_fn):
@symbolic_helper.quantized_args(True, False, False, False, False, False, False)
@symbolic_helper.parse_args("v", "is", "is", "is", "i", "i", "none")
def symbolic_fn(
g,
input: _C.Value,
kernel_size: Sequence[int],
stride: Sequence[int],
padding: Sequence[int],
ceil_mode: int,
count_include_pad: int,
divisor_override=None,
):
if not stride:
stride = kernel_size
padding = symbolic_helper._avgpool_helper(
tuple_fn, padding, kernel_size, stride, divisor_override, name
)
if count_include_pad:
input = opset9.op_with_optional_float_cast(
g,
"Pad",
input,
pads_i=((0,) * 2 + padding) * 2,
mode_s="constant",
value_f=0.0,
opset_before=11,
)
padding = (0,) * len(padding)
output = g.op(
"AveragePool",
input,
kernel_shape_i=tuple_fn(kernel_size),
strides_i=tuple_fn(stride),
pads_i=padding * 2,
ceil_mode_i=ceil_mode,
)
return output
return symbolic_fn
avg_pool1d = _avg_pool("avg_pool1d", torch.nn.modules.utils._single)
avg_pool2d = _avg_pool("avg_pool2d", torch.nn.modules.utils._pair)
avg_pool3d = _avg_pool("avg_pool3d", torch.nn.modules.utils._triple)
def _interpolate(name, dim, interpolate_mode):
@symbolic_helper.quantized_args(True, False, False)
def symbolic_fn(g, input, output_size, *args):
scales, align_corners = symbolic_helper._get_interpolate_attributes(
g, interpolate_mode, args
)
symbolic_helper._interpolate_warning(interpolate_mode)
align_corners = symbolic_helper._maybe_get_scalar(align_corners)
if align_corners:
return symbolic_helper._unimplemented(name, "align_corners == True")
if scales is None:
scales = symbolic_helper._interpolate_size_to_scales(
g, input, output_size, dim
)
return g.op("Resize", input, scales, mode_s=interpolate_mode)
return symbolic_fn
upsample_nearest1d = _interpolate("upsample_nearest1d", 3, "nearest")
upsample_nearest2d = _interpolate("upsample_nearest2d", 4, "nearest")
upsample_nearest3d = _interpolate("upsample_nearest3d", 5, "nearest")
upsample_linear1d = _interpolate("upsample_linear1d", 3, "linear")
upsample_bilinear2d = _interpolate("upsample_bilinear2d", 4, "linear")
upsample_trilinear3d = _interpolate("upsample_trilinear3d", 5, "linear")
def __interpolate(
g, input, size, scale_factor, mode, align_corners, recompute_scale_factor, antialias
):
scales, mode = symbolic_helper._interpolate_get_scales_and_mode(
g, input, size, scale_factor, mode, align_corners
)
return g.op("Resize", input, scales, mode_s=mode)
def _slice(g, input, axes, starts, ends, steps=None, dynamic_slice=False):
if dynamic_slice:
starts = symbolic_helper._unsqueeze_helper(g, starts, [0])
ends = symbolic_helper._unsqueeze_helper(g, ends, [0])
if isinstance(axes, int):
axes = g.op("Constant", value_t=torch.tensor(axes))
axes = symbolic_helper._unsqueeze_helper(g, axes, [0])
else:
assert len(starts) == len(ends)
assert len(starts) == len(axes)
assert steps is None or len(starts) == len(steps)
if (
len(starts) == 1
and starts[0] == 0
and ends[0] == 9223372036854775807
and (steps is None or (len(steps) == 1 and steps[0] == 1))
):
return input
axes = g.op("Constant", value_t=torch.tensor(axes))
starts = g.op("Constant", value_t=torch.tensor(starts))
ends = g.op("Constant", value_t=torch.tensor(ends))
if steps is None:
return g.op("Slice", input, starts, ends, axes)
steps = g.op("Constant", value_t=torch.tensor(steps))
return g.op("Slice", input, starts, ends, axes, steps)
def slice(g, self, *args):
if len(args) == 4:
# aten::slice(Tensor self, int dim, int? start=None, int? end=None, int step=1) -> Tensor
dim, start, end, step = args
elif len(args) == 3:
# aten::slice(t[] l, int? start=None, int? end=None, int step=1) -> t[]
start, end, step = args
dim = 0
else:
raise NotImplementedError("Unknown aten::slice signature")
is_start_none = start.node().kind() == "prim::Constant" and isinstance(
start.type(), _C.NoneType
)
is_end_none = end.node().kind() == "prim::Constant" and isinstance(
end.type(), _C.NoneType
)
is_start_onnx_const = start.node().kind() == "onnx::Constant"
is_end_onnx_const = end.node().kind() == "onnx::Constant"
step = symbolic_helper._parse_arg(step, "i")
if (
(not is_start_none and not is_start_onnx_const)
or (not isinstance(end, int) and not is_end_none and not is_end_onnx_const)
or (not isinstance(dim, int) and dim.node().kind() != "onnx::Constant")
):
dynamic_slice = True
if is_start_none:
start = g.op("Constant", value_t=torch.tensor(0))
if is_end_none:
end = g.op("Constant", value_t=torch.tensor(9223372036854775807))
else:
start = [0 if is_start_none else symbolic_helper._parse_arg(start, "i")]
end = [
9223372036854775807 if is_end_none else symbolic_helper._parse_arg(end, "i")
]
dim = [symbolic_helper._parse_arg(dim, "i")]
dynamic_slice = False
return symbolic_helper._slice_helper(
g,
self,
axes=dim,
starts=start,
ends=end,
steps=[step],
dynamic_slice=dynamic_slice,
)
@symbolic_helper.parse_args("v", "is")
def flip(g, input, dims):
return symbolic_helper._slice_helper(
g,
input,
axes=dims,
starts=[-1] * len(dims),
ends=[-9223372036854775807] * len(dims),
steps=[-1] * len(dims),
)
def fmod(g, input, other):
return g.op("Mod", input, other, fmod_i=1)
@symbolic_helper.parse_args("v", "v", "v", "i", "i", "i", "v", "i", "i")
def embedding_bag(
g,
embedding_matrix,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
padding_idx,
):
if scale_grad_by_freq and GLOBALS.export_training:
return symbolic_helper._onnx_unsupported(
"embedding_bag with scale_grad_by_freq for training mode"
)
if padding_idx is not None and padding_idx >= 0:
raise RuntimeError("embedding_bag with padding_idx")
warnings.warn(
"Export of embedding_bag with dynamic input/offsets shape is not supported in opset 10. "
"Please use opset 11 or higher to export model for dynamic input shape.'"
)
offsets_dim_0 = symbolic_helper._get_tensor_dim_size(offsets, 0)
if offsets_dim_0 is not None:
if include_last_offset:
offset_len = offsets_dim_0 - 1
offsets_extended = offsets
else:
offset_len = offsets_dim_0
offsets_extended = [
offsets,
g.op("Constant", value_t=torch.tensor([sys.maxsize])),
]
offsets_extended = g.op("Concat", *offsets_extended, axis_i=0)
list_ = []
for i in range(offset_len):
start_ = symbolic_helper._unsqueeze_helper(
g,
opset9.select(g, offsets_extended, torch.tensor(0), torch.tensor(i)),
[0],
)
end_ = symbolic_helper._unsqueeze_helper(
g,
opset9.select(
g, offsets_extended, torch.tensor(0), torch.tensor(i + 1)
),
[0],
)
axes_ = g.op("Constant", value_t=torch.tensor([0]))
indices_row = g.op("Slice", indices, start_, end_, axes_)
embeddings = g.op("Gather", embedding_matrix, indices_row)
if not symbolic_helper._is_none(per_sample_weights):
per_sample_weights_row = g.op(
"Slice", per_sample_weights, start_, end_, axes_
)
per_sample_weights_row = symbolic_helper._unsqueeze_helper(
g, per_sample_weights_row, [1]
)
embeddings = g.op("Mul", embeddings, per_sample_weights_row)
if mode == 0:
embeddings = symbolic_helper._reducesum_helper(
g, embeddings, axes_i=[0], keepdims_i=0
)
elif mode == 1:
embeddings = g.op("ReduceMean", embeddings, axes_i=[0], keepdims_i=0)
else:
embeddings = g.op("ReduceMax", embeddings, axes_i=[0], keepdims_i=0)
embeddings = symbolic_helper._unsqueeze_helper(g, embeddings, [0])
list_.append(embeddings)
output = g.op("Concat", *list_, axis_i=0)
# aten::embedding_bag returns a tuple of 4 elements: output, offset2bag, bag_size, max_indices.
# But the last three outputs are not used in torch.nn.EmbeddingBag or torch.nn.functional.embedding_bag.
return output, None, None, None
else:
return symbolic_helper._onnx_unsupported(
"embedding_bag with unknown shape of offsets for opset 10 is not supported. "
"please use opset 11 or higher."
)
@symbolic_helper.parse_args("v", "v", "v", "i", "i")
def fake_quantize_per_tensor_affine(
g, inputs, scale, zero_point, quant_min=-128, quant_max=127
):
# NOTE: (0, 127) is a special case. PyTorch restricts activations to be in the range (0, 127).
# https://github.com/pytorch/pytorch/blob/b34b192d6b97325c9f78e5995c48c8498ede34bd/torch/ao/quantization/observer.py#L1422
if (quant_min, quant_max) == (0, 127):
symbolic_helper._onnx_opset_unsupported_detailed(
"fake_quantize_per_tensor_affine",
10,
13,
"Quantize range (0, 127) not supported, requires opset 13 Clip",
)
if (quant_min, quant_max) not in [(0, 255), (-128, 127)]:
raise RuntimeError(
f"For (quant_min, quant_max), ONNX allows only (0, 255) and (-128, 127). "
f"Got ({quant_min}, {quant_max})"
)
scale = symbolic_helper._maybe_get_scalar(scale)
if scale is None:
symbolic_helper._onnx_opset_unsupported_detailed(
"fake_quantize_per_tensor_affine",
10,
13,
"Non-constant scale not supported",
)
scale = scale.float().data # Avoid exporter generating double type
if quant_min == 0:
zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.UINT8)
else:
zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.INT8)
return g.op(
"DequantizeLinear",
g.op("QuantizeLinear", inputs, scale, zero_point),
scale,
zero_point,
)
def isinf(g, input):
return g.op("IsInf", opset9._cast_Double(g, input, False)) # type: ignore[attr-defined]
def isfinite(g, input):
from torch.onnx.symbolic_opset9 import __not_, __or_
inf_node = isinf(g, input)
nan_node = opset9.isnan(g, input)
return __not_(g, __or_(g, inf_node, nan_node))
def quantize_per_tensor(g, input, scale, zero_point, dtype):
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
# TODO(justinchuby): Extract all the cast ops into a helper function.
zero_point = g.op(
"Cast", zero_point, to_i=_type_utils.JitScalarType(dtype).onnx_type()
)
scale = g.op("Cast", scale, to_i=_C_onnx.TensorProtoDataType.FLOAT)
return symbolic_helper.quantize_helper(g, input, scale, zero_point)
def dequantize(g, input):
return symbolic_helper.dequantize_helper(g, input)[0]
@symbolic_helper.parse_args("v", "f", "f", "f")
def nan_to_num(g, input, nan, posinf, neginf):
# Cannot create a int type tensor with inf/nan values, so we simply
# return the original tensor
if not symbolic_helper._is_fp(input):
return input
input_dtype = _type_utils.JitScalarType.from_name(input.type().scalarType()).dtype()
if nan is None:
nan = 0.0
nan_cond = opset9.isnan(g, input)
nan_result = g.op(
"Where",
nan_cond,
g.op("Constant", value_t=torch.tensor([nan], dtype=input_dtype)),
input,
)
# For None values of posinf, neginf we use the greatest/lowest finite
# value representable by input’s dtype.
finfo = torch.finfo(input_dtype)
if posinf is None:
posinf = finfo.max
posinf_cond = opset9.logical_and(
g,
isinf(g, nan_result),
opset9.gt(g, nan_result, g.op("Constant", value_t=torch.LongTensor([0]))),
)
nan_posinf_result = g.op(
"Where",
posinf_cond,
g.op("Constant", value_t=torch.tensor([posinf], dtype=input_dtype)),
nan_result,
)
if neginf is None:
neginf = finfo.min
neginf_cond = opset9.logical_and(
g,
isinf(g, nan_posinf_result),
opset9.lt(
g, nan_posinf_result, g.op("Constant", value_t=torch.LongTensor([0]))
),
)
return g.op(
"Where",
neginf_cond,
g.op("Constant", value_t=torch.tensor([neginf], dtype=input_dtype)),
nan_posinf_result,
)
# https://github.com/pytorch/pytorch/wiki/PyTorch-ONNX-exporter#quantized-model-export
class Quantized:
"""
https://github.com/pytorch/pytorch/wiki/PyTorch-ONNX-exporter#quantized-model-export
Support starts from opset 10 because `DequantizeLinear` and `QuantizeLinear` were introduced in opset version 10.
"""
domain = "quantized"
@staticmethod
def linear(g, q_input, q_weight, bias, op_scale, op_zero_point):
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight)
q_bias = symbolic_helper.requantize_bias_helper(
g, bias, input_scale, weight_scale
)
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
output = opset9.linear(g, input, weight, bias)
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
@staticmethod
def add(g, x, y, op_scale, op_zero_point):
x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
y, _, _, _ = symbolic_helper.dequantize_helper(g, y)
output = opset9.add(g, x, y)
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
@staticmethod
def add_relu(g, x, y, op_scale, op_zero_point):
x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
y, _, _, _ = symbolic_helper.dequantize_helper(g, y)
output = opset9.add(g, x, y)
output = opset9.relu(g, output)
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
@staticmethod
def mul(g, x, y, op_scale, op_zero_point):
x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
y, _, _, _ = symbolic_helper.dequantize_helper(g, y)
output = opset9.mul(g, x, y)
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
@staticmethod
def hardswish(g, x, op_scale, op_zero_point):
x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
output = opset9.hardswish(g, x)
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
@staticmethod
def conv2d_relu(
g,
q_input,
q_weight,
bias,
stride,
padding,
dilation,
groups,
op_scale,
op_zero_point,
):
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight)
q_bias = symbolic_helper.requantize_bias_helper(
g, bias, input_scale, weight_scale
)
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
output = opset9.conv2d(
g, input, weight, bias, stride, padding, dilation, groups
)
output = opset9.relu(g, output)
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
@staticmethod
def conv2d(
g,
q_input,
q_weight,
bias,
stride,
padding,
dilation,
groups,
op_scale,
op_zero_point,
):
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
weight, weight_scale, _, _ = symbolic_helper.dequantize_helper(g, q_weight)
q_bias = symbolic_helper.requantize_bias_helper(
g, bias, input_scale, weight_scale
)
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
output = opset9.conv2d(
g, input, weight, bias, stride, padding, dilation, groups
)
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
@staticmethod
@symbolic_helper.parse_args("v", "i", "v", "v")
def cat(
g,
q_inputs: _C.Value,
dim: int,
op_scale: _C.Value,
op_zero_point: _C.Value,
) -> _C.Value:
unpacked_inputs = symbolic_helper._unpack_list(q_inputs)
dequantized = [
symbolic_helper.dequantize_helper(g, input)[0] for input in unpacked_inputs
]
concatenated = g.op("Concat", *dequantized, axis_i=dim)
return symbolic_helper.quantize_helper(g, concatenated, op_scale, op_zero_point)
| pytorch-master | torch/onnx/symbolic_opset10.py |
"""This file exports ONNX ops for opset 14.
Note [ONNX operators that are added/updated in opset 14]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
New operators:
HardSwish, Trilu
Updated operators:
Reshape
Add, Sub, Mul, Div
GRU, LSTM, RNN
BatchNorm, Cumsum, Relu
"""
# EDITING THIS FILE? READ THIS FIRST!
# see Note [Edit Symbolic Files] in symbolic_helper.py
import torch
from torch.onnx import symbolic_helper
from torch.onnx._globals import GLOBALS
@symbolic_helper.parse_args("v")
def hardswish(g, self):
return g.op("HardSwish", self)
@symbolic_helper.parse_args("v", "i")
def tril(g, self, diagonal, out=None):
k = g.op("Constant", value_t=torch.tensor(diagonal, dtype=torch.int64))
return g.op("Trilu", self, k, upper_i=0)
@symbolic_helper.parse_args("v", "i")
def triu(g, self, diagonal, out=None):
k = g.op("Constant", value_t=torch.tensor(diagonal, dtype=torch.int64))
return g.op("Trilu", self, k, upper_i=1)
@symbolic_helper.parse_args("v", "v")
def reshape(g, self, shape):
# NOTE: Due to bug in ORT https://github.com/microsoft/onnxruntime/issues/10664
# Reshape export cannot utilize the new allowzero attribute introduced in opset 14.
return symbolic_helper._reshape_helper(g, self, shape, allowzero=0)
@symbolic_helper.parse_args("v", "v", "v", "v", "v", "i", "f", "f", "i")
def batch_norm(
g,
input,
weight,
bias,
running_mean,
running_var,
training,
momentum,
eps,
cudnn_enabled,
):
if (
torch.is_autocast_enabled()
and not symbolic_helper.args_have_same_dtype(
[input, weight, bias, running_mean, running_var]
)
and GLOBALS.export_onnx_opset_version < 15
):
return symbolic_helper._onnx_opset_unsupported_detailed(
"BatchNormalization",
14,
15,
"All input tensors must have the same `dtype`."
" Turn off Autocast or export using opset version 15.",
)
symbolic_helper.check_training_mode(training, "batch_norm")
weight, bias, running_mean, running_var = symbolic_helper._batchnorm_helper(
g, input, weight, bias, running_mean, running_var
)
out = g.op(
"BatchNormalization",
input,
weight,
bias,
running_mean,
running_var,
epsilon_f=eps,
momentum_f=1 - momentum,
training_mode_i=0 if not training else 1,
outputs=1 if not training else 3,
)
if not training:
return out
else:
res, new_running_mean, new_running_var = out
new_running_mean.setType(running_mean.type())
new_running_var.setType(running_var.type())
return res
class Quantized:
"""
https://github.com/pytorch/pytorch/wiki/PyTorch-ONNX-exporter#quantized-model-export
"""
domain = "quantized"
@staticmethod
def hardswish(g, x, op_scale, op_zero_point):
x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
output = hardswish(g, x)
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
| pytorch-master | torch/onnx/symbolic_opset14.py |
"""Globals used internally by the ONNX exporter.
Do not use this module outside of `torch.onnx` and its tests.
Be very judicious when adding any new global variables. Do not create new global
variables unless they are absolutely necessary.
"""
from typing import Optional
import torch._C._onnx as _C_onnx
# This module should only depend on _constants and nothing else in torch.onnx to keep
# dependency direction clean.
from torch.onnx import _constants
class _InternalGlobals:
"""Globals used internally by ONNX exporter.
NOTE: Be very judicious when adding any new variables. Do not create new
global variables unless they are absolutely necessary.
"""
def __init__(self):
self._export_onnx_opset_version = _constants.onnx_default_opset
self._training_mode: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL
self._in_onnx_export: bool = False
# Whether the user's model is training during export
self.export_training: bool = False
self.operator_export_type: Optional[_C_onnx.OperatorExportTypes] = None
self.onnx_shape_inference: bool = True
@property
def training_mode(self):
"""The training mode for the exporter."""
return self._training_mode
@training_mode.setter
def training_mode(self, training_mode: _C_onnx.TrainingMode):
if not isinstance(training_mode, _C_onnx.TrainingMode):
raise TypeError(
"training_mode must be of type 'torch.onnx.TrainingMode'. This is "
"likely a bug in torch.onnx."
)
self._training_mode = training_mode
@property
def export_onnx_opset_version(self) -> int:
"""Opset version used during export."""
return self._export_onnx_opset_version
@export_onnx_opset_version.setter
def export_onnx_opset_version(self, value: int):
supported_versions = [_constants.onnx_main_opset]
supported_versions.extend(_constants.onnx_stable_opsets)
if value not in supported_versions:
raise ValueError(f"Unsupported ONNX opset version: {value}")
self._export_onnx_opset_version = value
@property
def in_onnx_export(self) -> bool:
"""Whether it is in the middle of ONNX export."""
return self._in_onnx_export
@in_onnx_export.setter
def in_onnx_export(self, value: bool):
if type(value) is not bool:
raise TypeError("in_onnx_export must be a boolean")
self._in_onnx_export = value
GLOBALS = _InternalGlobals()
| pytorch-master | torch/onnx/_globals.py |
from __future__ import annotations
import functools
import inspect
import sys
import typing
import warnings
from typing import Any, Callable, List, Optional, Sequence, Set, Tuple, Union
from typing_extensions import Literal
import torch
import torch._C._onnx as _C_onnx
from torch import _C
# Monkey-patch graph manipulation methods on Graph, used for the ONNX symbolics
from torch.onnx import _patch_torch, _type_utils, errors # noqa: F401
from torch.onnx._globals import GLOBALS
# Note [Edit Symbolic Files]
# EDITING THIS FILE AND SYMBOLIC_OPSET<VERSION> FILES? READ THIS FIRST!
#
# - Module-level functions are called to convert the corresponding op in the `aten` domain.
# E.g. symbolic_opset9.foo is called to convert aten::foo.
# Symbolic functions for other domains are staticmethods in classes named after the domain.
# E.g. symbolic_opset9.Prim.ConstantChunk is called to convert prim::ConstantChunk.
# - Parameter names must *exactly* match the names in
# aten/src/ATen/native/native_functions.yaml, because
# dispatch is done with keyword arguments.
# - Looking for inplace ops? They're detected by
# `_jit_pass_onnx_remove_inplace_ops_for_onnx`, and
# transparently dispatched to their non inplace versions in
# "run_symbolic_function". See Note [Export inplace]
#
# ----------------------------------------------------------------------------------
# A note on Tensor types
# ----------------------------------------------------------------------------------
#
# In general, we should avoid depending on the type of Tensor Values contained
# within the trace graph. However, this is sometimes unavoidable (due to ONNX
# spec requirements, etc). The TensorType object has accessors for these properties
# that return the property if it is statically known and return nullopt otherwise.
#
# In general, we should prefer to rely on the least specific information possible.
# For example, not relying on tensor properties at all is better than relying
# on the number of dimensions which is better than relying on
# concrete shapes. Doing so will make the export symbolics
# more robust to different graphs.
#
# ----------------------------------------------------------------------------------
# Extra context for symbolic functions
# ----------------------------------------------------------------------------------
#
# In general, symbolic functions only require inputs and attributes to
# the original node. In rare circumstances, extra context may be required.
# For example, symbolic function for `prim::Loop` needs access to the subblock of
# the original node.
# A symbolic function that has a first arg (before the Graph object) with the
# type annotation of torch.onnx.SymbolicContext will be called with that additional context.
# During export, it is populated from `utils._run_symbolic_function`
# to contain the context for each node being converted.
__all__ = [
"args_have_same_dtype",
"cast_pytorch_to_onnx",
"check_training_mode",
"dequantize_helper",
"is_caffe2_aten_fallback",
"parse_args",
"pytorch_name_to_type",
"quantize_helper",
"quantized_args",
"requantize_bias_helper",
"scalar_name_to_pytorch",
"scalar_type_to_onnx",
"scalar_type_to_pytorch_type",
]
# ---------------------------------------------------------------------------------
# Helper functions
# ---------------------------------------------------------------------------------
_ValueDescriptor = Literal[
"v",
"i",
"is",
"f",
"fs",
"b",
"s",
"t",
"none",
]
def _parse_arg(
value,
desc: _ValueDescriptor,
arg_name: Optional[str] = None,
node_name: Optional[str] = None,
):
if desc == "none":
return value
if desc == "v" or not _is_value(value):
return value
node = value.node()
if node.mustBeNone():
return None
if node.kind() == "onnx::Constant":
node_val = _node_get(node, "value")
if desc == "i":
return int(node_val)
elif desc == "f":
return float(node_val)
elif desc == "b":
return bool(node_val)
elif desc == "s":
return str(node_val)
elif desc == "t":
return node_val
elif desc == "is":
return [int(v) for v in node_val]
elif desc == "fs":
return [float(v) for v in node_val]
else:
raise errors.SymbolicValueError(
f"ONNX symbolic does not understand the Constant node '{node}' "
f"specified with descriptor '{desc}'.",
value,
)
elif node.kind() == "prim::ListConstruct":
if desc == "is":
for v in node.inputs():
element_node = v.node()
if element_node.kind() != "onnx::Constant":
raise errors.SymbolicValueError(
f"Failed to export a node '{element_node}' "
f"(in list node {node}) "
f"because it is not constant. "
f"Please try to make things (e.g. kernel sizes) static if possible.",
value,
)
return [int(_node_get(v.node(), "value")) for v in value.node().inputs()]
else:
raise errors.SymbolicValueError(
f"ONNX symbolic does not know how to unpack the ListConstruct node that "
f"is not a list of integers: '{node}'",
value,
)
if arg_name is None or node_name is None:
raise errors.SymbolicValueError(
f"Expected node type 'onnx::Constant', got '{node.kind()}'.",
value,
)
raise errors.SymbolicValueError(
"Expected node type 'onnx::Constant' "
f"for argument '{arg_name}' of node '{node_name}', got '{node.kind()}'.",
value,
)
def _node_get(node: _C.Node, key: str):
"""Gets attributes of a node which is polymorphic over return type."""
assert isinstance(node, _C.Node)
sel = node.kindOf(key)
return getattr(node, sel)(key)
def _is_onnx_constant(value: _C.Value):
"""Whether a Value is an ONNX constant."""
return value.node().kind() == "onnx::Constant"
def _maybe_get_const(value: _C.Value, descriptor: _ValueDescriptor):
# NOTE: prim::Constant at this stage usually means something not compatible in ONNX,
# otherwise it'd be converted to onnx::Constant
if _is_value(value) and _is_onnx_constant(value):
return _parse_arg(value, descriptor)
return value
def _maybe_get_scalar(value):
value_t = _maybe_get_const(value, "t")
if isinstance(value_t, torch.Tensor) and value_t.shape == ():
return value_t
return value
def _get_const(value, desc, arg_name):
if not _is_constant(value):
raise errors.SymbolicValueError(
f"ONNX symbolic expected a constant value of the '{arg_name}' argument, "
f"got '{value}'",
value,
)
return _parse_arg(value, desc)
def _unpack_list(list_value: _C.Value) -> List[_C.Value]:
list_node = list_value.node()
if list_node.kind() != "prim::ListConstruct":
raise errors.SymbolicValueError(
f"ONNX symbolic expected node type prim::ListConstruct, "
f"got '{list_node}'.",
list_value,
)
return list(list_node.inputs())
def _unpack_tuple(tuple_value: _C.Value) -> Tuple[_C.Value, ...]:
tuple_node = tuple_value.node()
if tuple_node.kind() != "prim::TupleConstruct":
raise errors.SymbolicValueError(
f"ONNX symbolic expected node type 'prim::TupleConstruct', "
f"got '{tuple_node.kind()}'.",
tuple_value,
)
return tuple(tuple_node.inputs())
# Check if list_value is output from prim::ListConstruct
# This is usually called before _unpack_list to ensure the list can be unpacked.
def _is_packed_list(list_value: _C.Value) -> bool:
return _is_value(list_value) and list_value.node().kind() == "prim::ListConstruct"
def parse_args(*arg_descriptors: _ValueDescriptor):
"""A decorator which converts args from torch._C.Value to built-in types.
For example:
```
@parse_args('v', 'i', 'fs')
foo(g, a, b, c):
assert isinstance(a, torch._C.Value)
assert isinstance(b, int)
assert isinstance(c, list)
assert isinstance(c[0], float)
```
Args:
arg_descriptors: list of str, where each element is
a string that specifies the type to convert to. Valid descriptors:
"v": no conversion, keep torch._C.Value.
"i": int
"is": list of int
"f": float
"fs": list of float
"b": bool
"s": str
"t": torch.Tensor
"""
def decorator(fn):
fn._arg_descriptors = arg_descriptors
@functools.wraps(fn)
def wrapper(g, *args, **kwargs):
# some args may be optional, so the length may be smaller
FILE_BUG_MSG = (
"If you believe this is not due to custom symbolic implementation within your code or "
"an external library, please file an issue at "
"https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml to report this bug."
)
assert len(arg_descriptors) >= len(args), (
f"A mismatch between the number of arguments ({len(args)}) and "
f"their descriptors ({len(arg_descriptors)}) was found at symbolic function '{fn.__name__}'. "
f"{FILE_BUG_MSG}"
)
try:
sig = inspect.signature(fn)
arg_names = list(sig.parameters.keys())[1:]
fn_name = fn.__name__
except Exception:
# FIXME(justinchuby): Avoid catching Exception.
# Catch a more specific exception instead.
arg_names = [None] * len(args) # type: ignore[list-item]
fn_name = None
args = [
_parse_arg(arg, arg_desc, arg_name, fn_name) # type: ignore[assignment]
for arg, arg_desc, arg_name in zip(args, arg_descriptors, arg_names)
]
# only support _outputs in kwargs
assert len(kwargs) <= 1, (
f"Symbolic function {fn.__name__}'s '**kwargs' can contain a single "
f"key/value entry. "
f"{FILE_BUG_MSG}"
)
if len(kwargs) == 1:
assert "_outputs" in kwargs, (
f"Symbolic function {fn.__name__}'s '**kwargs' can only contain "
f"'_outputs' key at '**kwargs'. "
f"{FILE_BUG_MSG}"
)
return fn(g, *args, **kwargs)
return wrapper
return decorator
def quantized_args(
*arg_q_descriptors: bool,
scale: Optional[float] = None,
zero_point: Optional[int] = None,
):
"""A decorator which extends support for quantized version of the base operator.
Quantization is detected by examining the arguments that are annotated by
`arg_q_descriptors`.
If quantization is detected, the base operator symbolic function will be wrapped with
argument de-quantization and output quantization.
Otherwise, only the base symbolic function will be invoked.
For example:
```
@quantized_args(True, False)
def foo(g, x, y):
return x + y
```
is equivalent to
```
def q_foo(g, x, y):
if is_quantized_tensor(x):
x = dequantize(x)
out = foo(g, x, y)
return quantize(out)
else:
return foo(g, x, y)
```
Args:
arg_q_descriptors: A sequence of bool, where each element represents if the
argument is QTensor for quantized version of this operator. It defaults
to False for unspecified (variable length) arguments.
scale: Quantized output scale. If None, derive from
the first quantized input scale.
zero_point: Quantized output zero point. If None,
derive from the first quantized input zero point.
"""
def decorator(fn):
fn._scale = scale
fn._zero_point = zero_point
@functools.wraps(fn)
def wrapper(g, *args, **kwargs):
_scale = fn._scale
if _scale is not None:
_scale = g.op("Constant", value_t=torch.tensor(_scale))
_zero_point = fn._zero_point
if _zero_point is not None:
_zero_point = g.op("Constant", value_t=torch.tensor(_zero_point))
# Support variable length arguments by marking unspecified ones as non-quantized
arg_q_descriptors_extended = arg_q_descriptors + (False,) * (
len(args) - len(arg_q_descriptors)
)
descriptor_args = tuple(zip(arg_q_descriptors_extended, args))
# Run regular symbolic function if none of the argument is QTensor.
if not any(
(descriptor and arg.node().kind() == "prim::TupleConstruct")
for descriptor, arg in descriptor_args
):
return fn(g, *args, **kwargs)
dequantized_args = []
for descriptor, arg in descriptor_args:
if descriptor:
dequantized_arg, scale, zero_point, _ = dequantize_helper(g, arg)
dequantized_args.append(dequantized_arg)
if _scale is None:
_scale = scale
if _zero_point is None:
_zero_point = zero_point
else:
dequantized_args.append(arg)
# TODO(justinchuby): Only single output is supported for now. We may want to
# support multiple outputs in the future.
output = fn(g, *dequantized_args, **kwargs)
return quantize_helper(g, output, _scale, _zero_point)
return wrapper
return decorator
def _scalar(x: torch.Tensor):
"""Convert a scalar tensor into a Python value."""
if isinstance(x, torch.Tensor) and x.shape == ():
return x.item()
return None
def _if_scalar_type_as(g: _C.Graph, self, tensor):
"""
Convert self into the same type of tensor, as necessary.
We only support implicit casting for scalars, so we never
actually need to insert an ONNX cast operator here; just
fix up the scalar.
"""
if isinstance(self, _C.Value):
return self
scalar_type = tensor.type().scalarType()
if scalar_type:
ty = scalar_type.lower()
return getattr(self, ty)()
return self
def _is_none(x: _C.Value) -> bool:
return x.node().mustBeNone()
def _is_value(x: Any) -> bool:
return isinstance(x, _C.Value)
def _is_constant(value: Any) -> bool:
return not _is_value(value) or value.node().kind() in {
"onnx::Constant",
"prim::Constant",
}
def _is_tensor(x: _C.Value) -> bool:
return x.type().isSubtypeOf(_C.TensorType.get())
def _as_list_type(jit_type: _C.JitType) -> Optional[_C.ListType]:
if isinstance(jit_type, _C.ListType):
return jit_type
return None
def _is_list(x: _C.Value) -> bool:
return _as_list_type(x.type()) is not None
def _is_tensor_list(x: _C.Value) -> bool:
x_type = _as_list_type(x.type())
if x_type is None:
return False
return isinstance(x_type.getElementType(), _C.TensorType)
def _is_scalar_list(x: _C.Value) -> bool:
"""Checks if x is a scalar list, for example: List[float], List[int].
Besides checking the type is ListType, we also check if the data type is
a valid ONNX data type.
"""
x_type = _as_list_type(x.type())
if x_type is None:
return False
element_type = str(x_type.getElementType())
return (
_type_utils.valid_torch_name(element_type)
and _type_utils.JitScalarType.from_name(element_type).onnx_compatible()
)
def is_caffe2_aten_fallback() -> bool:
return (
GLOBALS.operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
and _C_onnx._CAFFE2_ATEN_FALLBACK
)
def _get_tensor_rank(x: _C.Value) -> Optional[int]:
if not _is_tensor(x) or x.type() is None:
return None
x_type = x.type()
x_type = typing.cast(_C.TensorType, x_type)
return x_type.dim()
def _get_tensor_sizes(x: _C.Value, allow_nonstatic: bool = True):
if not _is_tensor(x) or x.type() is None:
return None
x_type = x.type()
x_type = typing.cast(_C.TensorType, x_type)
if allow_nonstatic:
# Each individual symbol is returned as None.
# e.g. [1, "a", "b"] -> [1, None, None]
return x_type.varyingSizes()
# returns None, if exists any symbol in sizes.
# e.g. [1, "a", "b"] -> None
return x_type.sizes()
def _get_tensor_dim_size(x: _C.Value, dim: int) -> Optional[int]:
sizes = _get_tensor_sizes(x)
return sizes[dim] if sizes else None
def _get_dim_for_cross(x: _C.Value, dim: Optional[int]):
if dim == -1:
tensor_rank = _get_tensor_rank(x)
assert tensor_rank is not None
return dim + tensor_rank
# If dim is not given, it defaults to the first dimension found with the size 3
if dim is None:
sizes = _get_tensor_sizes(x)
assert sizes is not None
for index, size in enumerate(sizes):
if size is not None and size == 3:
return index
return dim
def _unimplemented(op: str, msg: str):
# For BC reasons, the behavior for Caffe2 does not raise exception for unimplemented operators
if _C_onnx._CAFFE2_ATEN_FALLBACK:
warnings.warn(
"ONNX export failed on " + op + " because " + msg + " not supported"
)
elif GLOBALS.operator_export_type == _C_onnx.OperatorExportTypes.ONNX:
_onnx_unsupported(f"{op}, {msg}")
def _onnx_unsupported(op_name: str):
raise RuntimeError(
f"Unsupported: ONNX export of operator {op_name}. "
"Please feel free to request support or submit a pull request on PyTorch GitHub."
)
def _onnx_opset_unsupported(op_name: str, current_opset: int, supported_opset: int):
raise RuntimeError(
f"Unsupported: ONNX export of {op_name} in opset {current_opset}. "
f"Please try opset version {supported_opset}."
)
def _onnx_opset_unsupported_detailed(
op_name: str, current_opset: int, supported_opset: int, reason: str
):
raise RuntimeError(
f"Unsupported: ONNX export of {op_name} in "
f"opset {current_opset}. {reason}. Please try opset version {supported_opset}."
)
def _block_list_in_opset(name: str):
def symbolic_fn(*args, **kwargs):
raise RuntimeError(
f"ONNX export failed on {name}, which is not implemented for opset "
f"{GLOBALS.export_onnx_opset_version}. "
"Try exporting with other opset versions."
)
return symbolic_fn
def _try_get_scalar_type(*args) -> Optional[str]:
for arg in args:
try:
return arg.type().scalarType()
except RuntimeError:
pass
return None
def _select_helper(g, self, dim, index, apply_reshape=True):
index_const = _maybe_get_scalar(index)
index_dim = _get_tensor_rank(index)
if not _is_value(index_const):
# Index is a constant scalar. Make it a size 1 constant tensor.
index = g.op("Constant", value_t=torch.LongTensor([index_const]))
elif index_dim is not None and apply_reshape:
if index_dim == 0:
# Index is a scalar. Reshape it to a size 1 tensor.
index = _reshape_helper(
g, index, g.op("Constant", value_t=torch.LongTensor([1]))
)
index_scalar_type = index.type().scalarType()
if index_scalar_type is None or index_scalar_type not in {"Long", "Int"}:
index = g.op("Cast", index, to_i=_C_onnx.TensorProtoDataType.INT64)
return g.op("Gather", self, index, axis_i=dim)
def _slice_helper(g, input, axes, starts, ends, steps=None, dynamic_slice=False):
if GLOBALS.export_onnx_opset_version <= 9:
from torch.onnx.symbolic_opset9 import _slice as _slice9
return _slice9(g, input, axes, starts, ends)
else:
from torch.onnx.symbolic_opset10 import _slice as _slice10
return _slice10(g, input, axes, starts, ends, steps, dynamic_slice)
def _is_in_type_group(value, scalar_types: Set[_type_utils.JitScalarType]) -> bool:
"""Helper function for determining if a value is in a scalar type group."""
if value is None:
return False
if isinstance(value, torch.Tensor):
return _type_utils.JitScalarType.from_dtype(value.dtype) in scalar_types
elif isinstance(value.type(), torch.ListType):
return (
_type_utils.JitScalarType.from_dtype(value.type().getElementType().dtype())
in scalar_types
)
scalar_type = value.type().scalarType()
if scalar_type is None:
warnings.warn(
"Type cannot be inferred, which might cause exported graph to produce incorrect results."
)
return False
try:
return _type_utils.JitScalarType.from_name(scalar_type) in scalar_types
except ValueError:
# scalar_type is not a known ScalarType
return False
def _is_fp(value) -> bool:
return _is_in_type_group(
value,
{
_type_utils.JitScalarType.FLOAT,
_type_utils.JitScalarType.DOUBLE,
_type_utils.JitScalarType.HALF,
_type_utils.JitScalarType.BFLOAT16,
},
)
def _is_bool(value) -> bool:
return _is_in_type_group(value, {_type_utils.JitScalarType.BOOL})
def _generate_wrapped_number(g, scalar):
"""Creates a wrapped number based on https://github.com/pytorch/pytorch/issues/9515.
A Tensor is a considered a "wrapped number" if it is
auto-wrapped from a C++ or Python number type. Integer types are
wrapped as 0-dim int64 tensors and floating-point types are
wrapped as 0-dim double tensors.
The input to this function is constant value. If the data type
is a floating point type, it is converted to a 0-dim double
tensor, else it is converted to a 0-dim tensor of its original type
"""
assert not isinstance(scalar, torch.Tensor)
if isinstance(scalar, float):
return g.op("Constant", value_t=torch.tensor(scalar, dtype=torch.double))
return g.op("Constant", value_t=torch.tensor(scalar))
def _sort_helper(g, input, dim, decending=True, out=None):
if out is not None:
_unimplemented("Sort", "Out parameter is not supported")
shape_ = g.op("Shape", input)
dim_size_ = g.op(
"Gather",
shape_,
g.op("Constant", value_t=torch.tensor([dim], dtype=torch.int64)),
)
if GLOBALS.export_onnx_opset_version <= 10:
if not decending:
_unimplemented("Sort", "Ascending is not supported")
return g.op("TopK", input, dim_size_, axis_i=dim, outputs=2)
else:
return g.op(
"TopK", input, dim_size_, axis_i=dim, largest_i=decending, outputs=2
)
def _topk_helper(g, input, k, dim, largest=True, sorted=False, out=None):
if out is not None:
_unimplemented("TopK", "Out parameter is not supported")
if not _is_value(k):
k = g.op("Constant", value_t=torch.tensor([k], dtype=torch.int64))
else:
k = _reshape_helper(g, k, g.op("Constant", value_t=torch.tensor([1])))
if _try_get_scalar_type(k) != "Long":
k = g.op("Cast", k, to_i=_C_onnx.TensorProtoDataType.INT64)
if GLOBALS.export_onnx_opset_version <= 10:
if not largest:
_unimplemented("TopK", "Ascending is not supported")
return g.op("TopK", input, k, axis_i=dim, outputs=2)
else:
return g.op(
"TopK", input, k, axis_i=dim, largest_i=largest, sorted_i=sorted, outputs=2
)
def _lt_helper(g, input, other):
if GLOBALS.export_onnx_opset_version <= 8:
from torch.onnx.symbolic_opset8 import lt as _lt8
return _lt8(g, input, other)
else:
from torch.onnx.symbolic_opset9 import lt as _lt9
return _lt9(g, input, other)
def _interpolate_warning(interpolate_mode):
onnx_op = (
"onnx:Resize" if GLOBALS.export_onnx_opset_version >= 10 else "onnx:Upsample"
)
warnings.warn(
"You are trying to export the model with "
+ onnx_op
+ " for ONNX opset version "
"" + str(GLOBALS.export_onnx_opset_version) + ". "
"This operator might cause results to not match the expected results by PyTorch.\n"
"ONNX's Upsample/Resize operator did not match Pytorch's Interpolation until opset 11. "
"Attributes to determine how to transform the input were added in onnx:Resize in opset 11 "
"to support Pytorch's behavior (like coordinate_transformation_mode and nearest_mode).\n"
"We recommend using opset 11 and above for models using this operator."
)
def _unsqueeze_helper(g, input, axes_i):
if _is_constant(axes_i[0]):
if GLOBALS.export_onnx_opset_version >= 13:
axes = g.op("Constant", value_t=torch.tensor(axes_i, dtype=torch.long))
return g.op("Unsqueeze", input, axes)
return g.op("Unsqueeze", input, axes_i=axes_i)
# Tensor type
if GLOBALS.export_onnx_opset_version < 13:
raise errors.SymbolicValueError(
"Opset version must be >= 13 for Unsqueeze with dynamic axes.", input
)
return g.op("Unsqueeze", input, axes_i[0])
def _squeeze_helper(g, input, axes_i):
if _is_constant(axes_i[0]):
if GLOBALS.export_onnx_opset_version >= 13:
axes = g.op("Constant", value_t=torch.tensor(axes_i, dtype=torch.long))
return g.op("Squeeze", input, axes)
return g.op("Squeeze", input, axes_i=axes_i)
# Tensor type
if GLOBALS.export_onnx_opset_version < 13:
raise errors.SymbolicValueError(
"Opset version must be >= 13 for Squeeze with dynamic axes.", input
)
axes_t = axes_i[0]
axes_rank = _get_tensor_rank(axes_t)
assert axes_rank is not None
if axes_rank > 1:
raise errors.SymbolicValueError(
"For Squeeze axses as input, the axes rank must be one in ONNX spec.", input
)
elif axes_rank == 0:
# The axes is a scalar. Unsqueeze it to a rank 1 tensor.
axes_t = _unsqueeze_helper(g, axes_t, [0])
return g.op("Squeeze", input, axes_t)
return g.op("Squeeze", input, axes_t)
def _reducesum_helper(g, input, axes_i=None, keepdims_i=1, noop_with_empty_axes_i=0):
keepdims_i = _maybe_get_const(keepdims_i, "i")
if GLOBALS.export_onnx_opset_version >= 13:
if axes_i:
if not _is_value(axes_i):
axes_i = g.op(
"Constant", value_t=torch.tensor(axes_i, dtype=torch.long)
)
return g.op(
"ReduceSum",
input,
axes_i,
keepdims_i=keepdims_i,
noop_with_empty_axes_i=noop_with_empty_axes_i,
)
return g.op(
"ReduceSum",
input,
keepdims_i=keepdims_i,
noop_with_empty_axes_i=noop_with_empty_axes_i,
)
else:
return g.op("ReduceSum", input, axes_i=axes_i, keepdims_i=keepdims_i)
def _interpolate_size_to_scales(g, input, output_size, dim):
output_size = _maybe_get_const(output_size, "is")
if _is_value(output_size):
offset = 2
offsets = g.op("Constant", value_t=torch.ones(offset, dtype=torch.float32))
dividend = g.op("Cast", output_size, to_i=_C_onnx.TensorProtoDataType.FLOAT)
divisor = _slice_helper(
g, g.op("Shape", input), axes=[0], ends=[sys.maxsize], starts=[offset]
)
divisor = g.op("Cast", divisor, to_i=_C_onnx.TensorProtoDataType.FLOAT)
scale_dims = g.op("Div", dividend, divisor)
scales = g.op("Concat", offsets, scale_dims, axis_i=0)
else:
scales_constant = [
1.0
if i < 2
else float(output_size[-(dim - i)])
/ float(input.type().sizes()[-(dim - i)])
for i in range(0, dim)
]
scales = g.op(
"Constant", value_t=torch.tensor(scales_constant, dtype=torch.float32)
)
return scales
def _interpolate_get_scales_if_available(g, scales):
available_scales = _maybe_get_const(scales[0], "fs") != -1 and not _is_none(
scales[0]
)
if not available_scales:
return None
offsets = g.op("Constant", value_t=torch.ones(2, dtype=torch.float32))
scales_list = g.op(
"Constant", value_t=torch.tensor(_maybe_get_const(scales[0], "fs"))
)
scales = g.op("Concat", offsets, scales_list, axis_i=0)
return scales
def _get_interpolate_attributes(g, mode, args):
if mode == "nearest":
align_corners = None
scales = args[0:]
else:
align_corners = args[0]
scales = args[1:]
scales = _interpolate_get_scales_if_available(g, scales)
return scales, align_corners
def _interpolate_get_scales(g, scale_factor, dim):
offsets = g.op("Constant", value_t=torch.ones(2, dtype=torch.float32))
scale_factor_rank = _get_tensor_rank(scale_factor)
if isinstance(scale_factor.type(), _C.ListType) or (
scale_factor_rank is not None and scale_factor_rank > 0
):
return g.op("Concat", offsets, scale_factor, axis_i=0)
else:
scale_factor = _unsqueeze_helper(g, scale_factor, [0])
scale_factor = g.op(
"Cast", scale_factor, to_i=_C_onnx.TensorProtoDataType.FLOAT
)
scales = [scale_factor for i in range(dim - 2)]
scale_factor = g.op("Concat", offsets, *scales, axis_i=0)
return scale_factor
def _interpolate_get_scales_and_mode(g, input, size, scale_factor, mode, align_corners):
mode = _maybe_get_const(mode, "s")
if "linear" in mode:
mode = "linear"
if "cubic" in mode:
mode = "cubic"
_interpolate_warning(mode)
align_corners = _maybe_get_const(align_corners, "b")
if isinstance(align_corners, bool) and align_corners:
return _unimplemented("interpolate", "align_corners == True")
if not input.type().dim():
return _unimplemented("interpolate", "missing input shape")
dim = input.type().dim()
if not _is_none(scale_factor):
scale_factor = _interpolate_get_scales(g, scale_factor, dim)
elif not _is_none(size):
if not _is_packed_list(size):
is_scalar = _maybe_get_const(size, "t").dim() == 0
if is_scalar:
size = _unsqueeze_helper(g, size, [0])
size = [size for i in range(dim - 2)]
size = g.op("Concat", *size, axis_i=0)
scale_factor = _interpolate_size_to_scales(g, input, size, dim)
else:
return _unimplemented(
"interpolate", "Both size and scales are None in __interpolate"
)
return scale_factor, mode
def _argmin_argmax_helper(
g, input: torch._C.Value, dim: torch._C.Value, keepdim: int, op_name: str
):
def op_wrapper(input, axis_i, keepdims_i):
if GLOBALS.export_onnx_opset_version >= 12:
return g.op(
op_name,
input,
axis_i=axis_i,
keepdims_i=keepdims_i,
select_last_index_i=False,
)
return g.op(op_name, input, axis_i=axis_i, keepdims_i=keepdims_i)
if _is_none(dim):
flattened = _reshape_helper(
g, input, g.op("Constant", value_t=torch.tensor([-1]))
)
output = op_wrapper(flattened, axis_i=0, keepdims_i=False)
if keepdim:
input_shape = g.op("Shape", input)
input_shape_shape = g.op("Shape", input_shape)
new_shape = g.op(
"ConstantOfShape",
input_shape_shape,
value_t=torch.tensor([1], dtype=torch.int64),
)
output = g.op("Reshape", output, new_shape)
return output
dim = _parse_arg(dim, "i")
return op_wrapper(input, axis_i=dim, keepdims_i=keepdim)
def _interpolate_helper(name, dim, interpolate_mode):
@quantized_args(True, False, False)
def symbolic_fn(g, input, output_size, *args):
scales, align_corners = _get_interpolate_attributes(g, interpolate_mode, args)
align_corners = _maybe_get_scalar(align_corners)
coordinate_transformation_mode = (
"asymmetric"
if interpolate_mode == "nearest"
else "align_corners"
if align_corners
else "half_pixel"
)
if scales is None:
input_size = g.op("Shape", input)
input_size_beg = _slice_helper(
g, input_size, axes=[0], ends=[2], starts=[0]
)
output_size = g.op(
"Cast", output_size, to_i=_C_onnx.TensorProtoDataType.INT64
)
output_size = g.op("Concat", input_size_beg, output_size, axis_i=0)
if GLOBALS.export_onnx_opset_version >= 13:
empty_roi = _optional_input_placeholder_tensor(g)
empty_scales = _optional_input_placeholder_tensor(g)
else:
empty_roi = g.op(
"Constant", value_t=torch.tensor([], dtype=torch.float32)
)
empty_scales = g.op(
"Constant", value_t=torch.tensor([], dtype=torch.float32)
)
return g.op(
"Resize",
input,
empty_roi,
empty_scales,
output_size,
coordinate_transformation_mode_s=coordinate_transformation_mode,
cubic_coeff_a_f=-0.75, # only valid when mode="cubic"
mode_s=interpolate_mode, # nearest, linear, or cubic
nearest_mode_s="floor",
) # only valid when mode="nearest"
else:
if GLOBALS.export_onnx_opset_version >= 13:
empty_roi = _optional_input_placeholder_tensor(g)
else:
empty_roi = g.op(
"Constant", value_t=torch.tensor([], dtype=torch.float32)
)
return g.op(
"Resize",
input,
empty_roi,
scales,
coordinate_transformation_mode_s=coordinate_transformation_mode,
cubic_coeff_a_f=-0.75, # only valid when mode="cubic"
mode_s=interpolate_mode, # nearest, linear, or cubic
nearest_mode_s="floor",
) # only valid when mode="nearest"
return symbolic_fn
def __interpolate_helper(
g, input, size, scale_factor, mode, align_corners, recompute_scale_factor
):
mode = _maybe_get_const(mode, "s")
if "linear" in mode:
mode = "linear"
if "cubic" in mode:
mode = "cubic"
align_corners = _maybe_get_const(align_corners, "b")
align_corners = False if not isinstance(align_corners, bool) else align_corners
coordinate_transformation_mode = (
"asymmetric"
if mode == "nearest"
else "align_corners"
if align_corners
else "half_pixel"
)
if not _is_none(size):
input_size = g.op("Shape", input)
input_size = _slice_helper(g, input_size, axes=[0], ends=[2], starts=[0])
# in some cases size is not a packed list but size is a scalar
# We need to also verify that (_maybe_get_const(size, "t").dim() == 0)
# but this information is not always available. Try to get the dim,
# and if not assume that it is not a scalar.
try:
is_scalar = not _is_packed_list(size) and (
_maybe_get_const(size, "t").dim() == 0
)
except AttributeError:
is_scalar = not _is_packed_list(size)
if not is_scalar:
warnings.warn(
"Cannot verify if the output_size is a scalar "
"while exporting interpolate. Assuming that it is not a scalar."
)
if is_scalar:
rank = _get_tensor_rank(input)
if rank is None:
return _unimplemented(
"interpolate (with a scalar output_size)",
"missing input shape (try giving an array of output_size values)",
)
size = _unsqueeze_helper(g, size, [0])
size = [size for i in range(rank - 2)]
size = g.op("Concat", *size, axis_i=0)
size = g.op("Cast", size, to_i=_C_onnx.TensorProtoDataType.INT64)
size = g.op("Concat", input_size, size, axis_i=0)
if GLOBALS.export_onnx_opset_version >= 13:
empty_roi = _optional_input_placeholder_tensor(g)
empty_scales = _optional_input_placeholder_tensor(g)
else:
empty_roi = g.op("Constant", value_t=torch.tensor([], dtype=torch.float32))
empty_scales = g.op(
"Constant", value_t=torch.tensor([], dtype=torch.float32)
)
return g.op(
"Resize",
input,
empty_roi,
empty_scales,
size,
coordinate_transformation_mode_s=coordinate_transformation_mode,
cubic_coeff_a_f=-0.75, # only valid when mode="cubic"
mode_s=mode, # nearest, linear, or cubic
nearest_mode_s="floor",
)
else: # if not _is_none(scales)
rank = _get_tensor_rank(input)
if rank is None:
return _unimplemented("interpolate (with scales)", "missing input shape")
if GLOBALS.export_onnx_opset_version >= 13:
empty_roi = _optional_input_placeholder_tensor(g)
else:
empty_roi = g.op("Constant", value_t=torch.tensor([], dtype=torch.float32))
scales = _interpolate_get_scales(g, scale_factor, rank)
return g.op(
"Resize",
input,
empty_roi,
scales,
coordinate_transformation_mode_s=coordinate_transformation_mode,
cubic_coeff_a_f=-0.75, # only valid when mode="cubic"
mode_s=mode, # nearest, linear, or cubic
nearest_mode_s="floor",
) # only valid when mode="nearest"
def _unbind_helper(g, self, dim, _outputs):
if GLOBALS.export_onnx_opset_version < 11:
from torch.onnx.symbolic_opset9 import unbind
elif GLOBALS.export_onnx_opset_version <= 12:
from torch.onnx.symbolic_opset11 import unbind # type: ignore[no-redef]
else:
from torch.onnx.symbolic_opset13 import unbind # type: ignore[no-redef]
return unbind(g, self, dim, _outputs)
def _scatter_helper(g, self, dim, index, src):
if GLOBALS.export_onnx_opset_version <= 10:
from torch.onnx.symbolic_opset9 import scatter
else:
# for mypy, scatter was imported two lines above
from torch.onnx.symbolic_opset11 import scatter # type: ignore[no-redef]
return scatter(g, self, dim, index, src)
def _repeat_interleave_split_helper(g, self, reps, dim):
if GLOBALS.export_onnx_opset_version <= 12:
split_out = g.op("Split", self, split_i=[1] * reps, axis_i=dim, outputs=reps)
else:
from torch.onnx.symbolic_opset13 import split
repeats = g.op("Constant", value_t=torch.tensor([1] * reps))
split_out = split(g, self, repeats, dim, _outputs=reps)
return split_out if reps > 1 else [split_out]
def _arange_cast_helper(
g, end, start=None, step=None, dtype=None
) -> Tuple[
_type_utils.JitScalarType,
Optional[_C.Value],
Optional[_C.Value],
Optional[_C.Value],
]:
def _is_all_integral(scalars):
for scalar in scalars:
try:
if scalar.type().scalarType() != "Long":
return False
except Exception:
# FIXME(justinchuby): Avoid catching Exception.
# Catch a more specific exception instead.
pass
return True
# This logic is based on torch.arange docs. If "dtype" is provided,
# infer input types from dtype. If not, then check if any of start, stop,
# or step are floating point, and infer the type from get_default.
# Otherwise, the dtype is inferred to be torch.int64.
if dtype is None or (_is_value(dtype) and _is_none(dtype)):
if _is_all_integral([start, end, step]):
scalar_type = _type_utils.JitScalarType.INT64
else:
scalar_type = _type_utils.JitScalarType.from_dtype(
torch.get_default_dtype()
)
else:
assert isinstance(dtype, int)
# TODO(justinchuby): Check if dtype is indeed a int.
scalar_type = _type_utils.JitScalarType(dtype)
start = g.op("Cast", start, to_i=scalar_type.onnx_type()) if start else None
end = g.op("Cast", end, to_i=scalar_type.onnx_type()) if end else None
step = g.op("Cast", step, to_i=scalar_type.onnx_type()) if step else None
return scalar_type, end, start, step
def _arange_helper(g, *args):
if GLOBALS.export_onnx_opset_version <= 10:
from torch.onnx.symbolic_opset9 import arange
else:
from torch.onnx.symbolic_opset11 import arange # type: ignore[no-redef]
return arange(g, *args)
def _size_helper(g, self, dim):
full_shape = g.op("Shape", self)
from torch.onnx.symbolic_opset9 import select
return select(g, full_shape, g.op("Constant", value_t=torch.tensor([0])), dim)
def _index_fill_reshape_helper(g, self, dim, index):
# 1. reshape index => [1, ..., 1, dim, 1, ..., 1]
# 2. expand index => [..., dim, ...], same shape as self except for dim.
# 3. expand value as well.
# 4. apply onnx::scatter.
from torch.onnx.symbolic_opset9 import expand
if GLOBALS.export_onnx_opset_version <= 10:
from torch.onnx.symbolic_opset9 import scatter
else:
# for mypy, scatter was imported two lines above
from torch.onnx.symbolic_opset11 import scatter # type: ignore[no-redef]
if self.type().dim() is None:
return _unimplemented("index_fill", "input rank not accesible")
self_dim = self.type().dim()
dim_value = _parse_arg(dim, "i")
unsqueezed_index = _unsqueeze_helper(
g, index, [i for i in range(self_dim) if i != dim_value]
)
expanded_index_shape = scatter(
g, g.op("Shape", self), 0, _unsqueeze_helper(g, dim, [0]), g.op("Shape", index)
)
expanded_index = expand(g, unsqueezed_index, expanded_index_shape, None)
return expanded_index_shape, expanded_index
# By default, when any value in the 'shape' input is equal to zero
# the corresponding dimension value is copied from the input tensor dynamically.
# allowzero=1 indicates that if any value in the 'shape' input is set to zero,
# the zero value is honored, similar to NumPy.
# allowzero=1 is only supported for opset version >= 14.
def _reshape_helper(g, input, shape, allowzero=0):
shape = _maybe_get_const(shape, "is")
if not _is_value(shape):
shape = g.op("Constant", value_t=torch.LongTensor(shape))
if GLOBALS.export_onnx_opset_version <= 13:
if allowzero == 1:
raise _onnx_opset_unsupported(
"Reshape with allowzero=1", GLOBALS.export_onnx_opset_version, 14
)
return g.op("Reshape", input, shape)
else:
return g.op("Reshape", input, shape, allowzero_i=allowzero)
def _batchnorm_helper(g, input, weight, bias, running_mean, running_var):
from torch.onnx.symbolic_opset9 import _var_mean
batch_size = _get_tensor_dim_size(input, 0)
channel_size = _get_tensor_dim_size(input, 1)
if weight is None or _is_none(weight):
if channel_size is None:
raise errors.SymbolicValueError(
"Unsupported: ONNX export of batch_norm for unknown channel size.",
input,
)
weight_value = torch.tensor(
[1.0] * channel_size,
dtype=_type_utils.JitScalarType.from_name(
input.type().scalarType()
).dtype(),
)
weight = g.op("Constant", value_t=weight_value)
if bias is None or _is_none(bias):
if channel_size is None:
raise errors.SymbolicValueError(
"Unsupported: ONNX export of batch_norm for unknown channel size.",
input,
)
bias_value = torch.tensor(
[0.0] * channel_size,
dtype=_type_utils.JitScalarType.from_name(
input.type().scalarType()
).dtype(),
)
bias = g.op("Constant", value_t=bias_value)
# If track_running_stats is set to False batch statistics are instead used during evaluation time
if (
running_mean is None
or _is_none(running_mean)
or running_var is None
or _is_none(running_var)
):
assert batch_size is not None and channel_size is not None
reshape_in = _reshape_helper(
g,
input,
g.op(
"Constant",
value_t=torch.tensor([batch_size, channel_size, -1], dtype=torch.int64),
),
)
trans_in = g.op("Transpose", reshape_in, perm_i=[0, 2, 1])
running_var, running_mean = _var_mean(
g,
trans_in,
g.op("Constant", value_t=torch.tensor([0, 1], dtype=torch.int64)),
False,
False,
)
return weight, bias, running_mean, running_var
def _avgpool_helper(
tuple_fn: Callable[[Any], Sequence[int]],
padding: Union[int, Sequence[int]],
kernel_size,
stride,
divisor_override,
name,
) -> Tuple[int, ...]:
if divisor_override and divisor_override.node().kind() != "prim::Constant":
_unimplemented(name, "divisor_override")
return tuple(tuple_fn(padding))
def check_training_mode(op_train_mode: int, op_name: str) -> None:
"""Warns the user if the model's training mode and the export mode do not agree."""
if GLOBALS.training_mode == _C_onnx.TrainingMode.PRESERVE:
return
if op_train_mode:
op_mode_enum = _C_onnx.TrainingMode.TRAINING
else:
op_mode_enum = _C_onnx.TrainingMode.EVAL
if op_mode_enum == GLOBALS.training_mode:
# The modes agree. Do nothing
return
op_mode_text = f"train={bool(op_train_mode)}"
# Setting the model mode could result in op_mode != GLOBALS.training_mode
# if the model is a FuncModule. In this case we warn the user of
# the state and export depending on op_mode
# This is to support use-cases of fixing certain layer weights
# in training.
warnings.warn(
f"ONNX export mode is set to {GLOBALS.training_mode}, but operator '{op_name}' "
f"is set to {op_mode_text}. Exporting with {op_mode_text}."
)
def _flatten_helper(g, input, start_dim, end_dim, dim):
input_size = g.op("Shape", input)
slice1 = _slice_helper(g, input_size, axes=[0], starts=[0], ends=[start_dim])
slices = [slice1, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long))]
if end_dim < dim - 1:
slice3 = _slice_helper(
g, input_size, axes=[0], starts=[end_dim + 1], ends=[dim]
)
slices = [
slice1,
g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)),
slice3,
]
final_shape = g.op("Concat", *slices, axis_i=0)
from torch.onnx.symbolic_opset9 import _reshape_from_tensor
return _reshape_from_tensor(g, input, final_shape)
def _is_split_static(split_size_or_sizes, _outputs):
if _outputs is None:
return False
if (
_is_value(split_size_or_sizes)
and split_size_or_sizes.node().kind() != "onnx::Constant"
):
return False
return True
def _optional_input_placeholder_tensor(g):
n = g.op("prim::Constant")
n.setType(_C.OptionalType.ofTensor())
return n
def _handle_reduce_dim_none(g, self, op_name):
rank = _get_tensor_rank(self)
if rank is not None and any(
[_get_tensor_dim_size(self, i) == 0 for i in range(rank)]
):
# If input tensor is empty, according to ONNX ReduceSum definition,
# set keepdims=1 so that the resulted tensor has the same rank as the input.
return g.op(op_name, self, keepdims_i=1)
return g.op(op_name, self, keepdims_i=0)
def dequantize_helper(
g,
qtensor: _C.Value,
qdtype: Optional[torch.onnx.TensorProtoDataType] = None,
) -> Tuple[_C.Value, _C.Value, _C.Value, Optional[_C.Value]]:
"""Appends to graph `g` ONNX nodes that dequantizes `qtensor` into `tensor`.
Args:
g: Graph, the ONNX IR graph that is under construction.
qtensor: torch._C.Value, either a tuple of (quantized_tensor, scale, zero_point) for per tensor quantization,
or (quantized_tensor, scale, zero_point, axis) for per channel quantization.
Representing the quantized tensor.
qdtype: torch.onnx.TensorProtoDataType default None, if not None, represents the data type of quantized tensor.
It must be either torch.onnx.TensorProtoDataType.UINT8 or torch.onnx.TensorProtoDataType.INT8.
"""
unpacked_qtensors = _unpack_tuple(qtensor)
tensor, scale, zero_point = unpacked_qtensors[:3]
axis = unpacked_qtensors[3] if len(unpacked_qtensors) >= 4 else None
axis_i = _get_const(axis, "i", "axis")
input_scalar_type = tensor.type().scalarType()
assert input_scalar_type is not None
input_qdtype = _type_utils.JitScalarType.from_name(tensor.type().scalarType())
if qdtype is None:
if input_qdtype is not None:
qdtype = input_qdtype.onnx_type()
else:
qdtype = _C_onnx.TensorProtoDataType.UINT8
value = g.op("Cast", tensor, to_i=qdtype)
scale = g.op("Cast", scale, to_i=_C_onnx.TensorProtoDataType.FLOAT)
zero_point = g.op("Cast", zero_point, to_i=qdtype)
if axis_i is not None and GLOBALS.export_onnx_opset_version < 13:
_onnx_opset_unsupported_detailed(
"DequantizeLinear",
GLOBALS.export_onnx_opset_version,
13,
"Attribute axis is not supported.",
)
return (
g.op("DequantizeLinear", value, scale, zero_point, axis_i=axis_i),
scale,
zero_point,
axis,
)
def quantize_helper(
g,
tensor: _C.Value,
scale: _C.Value,
zero_point: _C.Value,
axis: Optional[_C.Value] = None,
) -> _C.Value:
"""Appends to graph `g` ONNX nodes that quantizes `tensor` based on `scale`, `zero_point` and `axis`.
Args:
g: Graph, the ONNX IR graph that is under construction.
tensor: torch._C.Value, representing the tensor to be quantized.
scale: torch._C.Value, quantized scale.
zero_point: torch._C.Value, quantized zero point.
axis: Optional[torch._C.Value] default None, if None, represents per tensor quantization.
Otherwise, represents per channel quantization, along given axis.
"""
if (
axis is not None
and not _is_none(axis)
and GLOBALS.export_onnx_opset_version < 13
):
_onnx_opset_unsupported_detailed(
"QuantizeLinear",
GLOBALS.export_onnx_opset_version,
13,
"Attribute axis is not supported.",
)
assert scale is not None
if scale.type().scalarType() != "Float": # type: ignore[attr-defined]
# TODO(justinchuby): Remove type ignore after #81112 is checked in.
scale = g.op("Cast", scale, to_i=_C_onnx.TensorProtoDataType.FLOAT)
assert zero_point is not None
if zero_point.type().scalarType() not in ("Byte", "Char"): # type: ignore[attr-defined]
# TODO(justinchuby): Remove type ignore after #81112 is checked in.
zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.UINT8)
output = g.op(
"QuantizeLinear",
tensor,
scale,
zero_point,
axis_i=_get_const(axis, "i", "axis"),
)
args = [output, scale, zero_point]
if axis is not None and not _is_none(axis):
args.append(axis)
return g.op("prim::TupleConstruct", *args)
def requantize_bias_helper(g, bias, input_scale, weight_scale, axis=None):
"""In PyTorch, bias is float and is quantized to int32 implicitly inside the quantized ATen op kernel.
In ONNX we need to make the quantization explicit because operators expect all of their inputs to be quantized.
Since int32 is not a supported output type by ONNX operator `QuantizeLinear`, quantization is exported using
regular operators.
"""
bias_scale = g.op("Mul", weight_scale, input_scale)
bias_scale_shape = g.op("Shape", bias_scale)
bias_zero_point = g.op(
"ConstantOfShape", bias_scale_shape, value_t=torch.tensor([0], dtype=torch.int)
)
q_bias = g.op(
"Cast", g.op("Div", bias, bias_scale), to_i=_C_onnx.TensorProtoDataType.INT32
)
axis_args = []
if axis is not None and not _is_none(axis):
axis_args.append(axis)
return g.op("prim::TupleConstruct", q_bias, bias_scale, bias_zero_point, *axis_args)
def args_have_same_dtype(args):
assert args
base_dtype = args[0].type().scalarType()
has_same_dtype = all(elem.type().scalarType() == base_dtype for elem in args)
return has_same_dtype
# TODO(justinchuby): Delete these setters, users should set the vars directly.
def _set_opset_version(opset_version: int):
GLOBALS.export_onnx_opset_version = opset_version
def _set_operator_export_type(operator_export_type):
GLOBALS.operator_export_type = operator_export_type
# This function is for debug use only.
# onnx_shape_inference = True by default.
def _set_onnx_shape_inference(onnx_shape_inference: bool):
GLOBALS.onnx_shape_inference = onnx_shape_inference
# Deprecated. Internally use _type_utils.ScalarType
# TODO: remove these once we support Type's in the JIT IR and we can once again
# use the unified toType operator
cast_pytorch_to_onnx = {
"Byte": _C_onnx.TensorProtoDataType.UINT8,
"Char": _C_onnx.TensorProtoDataType.INT8,
"Double": _C_onnx.TensorProtoDataType.DOUBLE,
"Float": _C_onnx.TensorProtoDataType.FLOAT,
"Half": _C_onnx.TensorProtoDataType.FLOAT16,
"Int": _C_onnx.TensorProtoDataType.INT32,
"Long": _C_onnx.TensorProtoDataType.INT64,
"Short": _C_onnx.TensorProtoDataType.INT16,
"Bool": _C_onnx.TensorProtoDataType.BOOL,
"ComplexFloat": _C_onnx.TensorProtoDataType.COMPLEX64,
"ComplexDouble": _C_onnx.TensorProtoDataType.COMPLEX128,
"BFloat16": _C_onnx.TensorProtoDataType.BFLOAT16,
"Undefined": _C_onnx.TensorProtoDataType.UNDEFINED,
}
# Deprecated. Internally use _type_utils.ScalarType
scalar_name_to_pytorch = {
"uint8_t": "Byte",
"int8_t": "Char",
"double": "Double",
"float": "Float",
"half": "Half",
"int": "Int",
"int64_t": "Long",
"int16_t": "Short",
"bool": "Bool",
"complex64": "ComplexFloat",
"complex128": "ComplexDouble",
"qint8": "QInt8",
"quint8": "QUInt8",
"qint32": "QInt32",
"bfloat16": "BFloat16",
}
# Deprecated. Internally use _type_utils.ScalarType
# This indicates each scalar type's corresponding
# torch type. Related source:
# https://github.com/pytorch/pytorch/blob/344defc9733a45fee8d0c4d3f5530f631e823196/c10/core/ScalarType.h
scalar_type_to_pytorch_type = [
torch.uint8, # 0
torch.int8, # 1
torch.short, # 2
torch.int, # 3
torch.int64, # 4
torch.half, # 5
torch.float, # 6
torch.double, # 7
torch.complex32, # 8
torch.complex64, # 9
torch.complex128, # 10
torch.bool, # 11
torch.qint8, # 12
torch.quint8, # 13
torch.qint32, # 14
torch.bfloat16, # 15
]
# Deprecated. Internally use _type_utils.ScalarType
# source of truth is
# https://github.com/pytorch/pytorch/blob/master/torch/csrc/utils/tensor_dtypes.cpp
pytorch_name_to_type = {
"Byte": torch.uint8,
"Char": torch.int8,
"Double": torch.double,
"Float": torch.float,
"Half": torch.half,
"Int": torch.int,
"Long": torch.int64,
"Short": torch.short,
"Bool": torch.bool,
"ComplexFloat": torch.complex64,
"ComplexDouble": torch.complex128,
"QInt8": torch.qint8,
"QUInt8": torch.quint8,
"QInt32": torch.qint32,
"BFloat16": torch.bfloat16,
}
# Deprecated. Internally use _type_utils.ScalarType
scalar_type_to_onnx = [
cast_pytorch_to_onnx["Byte"], # 0
cast_pytorch_to_onnx["Char"], # 1
cast_pytorch_to_onnx["Short"], # 2
cast_pytorch_to_onnx["Int"], # 3
cast_pytorch_to_onnx["Long"], # 4
cast_pytorch_to_onnx["Half"], # 5
cast_pytorch_to_onnx["Float"], # 6
cast_pytorch_to_onnx["Double"], # 7
cast_pytorch_to_onnx["Undefined"], # 8
cast_pytorch_to_onnx["ComplexFloat"], # 9
cast_pytorch_to_onnx["ComplexDouble"], # 10
cast_pytorch_to_onnx["Bool"], # 11
cast_pytorch_to_onnx["Char"], # 12
cast_pytorch_to_onnx["Byte"], # 13
cast_pytorch_to_onnx["Int"], # 14
cast_pytorch_to_onnx["BFloat16"], # 15
]
# Global set to store the list of quantized operators in the network.
# This is currently only used in the conversion of quantized ops from PT -> C2 via ONNX.
_quantized_ops: Set[int] = set()
| pytorch-master | torch/onnx/symbolic_helper.py |
"""This file exports ONNX ops for opset 9.
Opset 9 is supported by ONNX release 1.4.1
release on 01/23/19
"""
import functools
import math
import sys
import warnings
from typing import List, Optional, Sequence, Tuple, Union
import torch
import torch._C._onnx as _C_onnx
import torch.nn.modules.utils
import torch.onnx
from torch import _C
# Monkey-patch graph manipulation methods on Graph, used for the ONNX symbolics
from torch.onnx import _patch_torch, _type_utils, symbolic_helper # noqa: F401
from torch.onnx._exporter_states import (
SymbolicContext, # Special case class import for readability
)
from torch.onnx._globals import GLOBALS
# EDITING THIS FILE? READ THIS FIRST!
# see Note [Edit Symbolic Files] in symbolic_helper.py
# Note [Pointwise by scalar]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
# What happens if you add a tensor with a constant (e.g., x + 2)? There are
# some moving parts to implementing the ONNX translation in this case:
#
# - By the time we get the scalar in a symbolic function here, it is no longer
# a Python long/float, but a PyTorch tensor with numel == 1 (eventually, we
# want it to be a zero dim tensor but this change has not happened yet.)
# However, the type of this scalar is *exactly* what the user wrote in
# Python, which may not match the tensor it is being added to. PyTorch
# will do implicit conversions on scalars; however, ONNX will not, so
# we must do the conversion ourselves. This is what _if_scalar_type_as
# does.
#
# - Dispatch to these functions takes advantage an outrageous coincidence
# between the tensor and scalar name. When we add two tensors together,
# you get the dispatch:
#
# add(*[self, other], **{"alpha": alpha})
#
# When you add a tensor and a scalar, you get the dispatch:
#
# add(*[self], **{"other": other, "alpha": alpha})
#
# By having the argument name line up with the name of the scalar attribute
# if it exists, we can write a single function for both overloads.
#
__all__ = [
"abs",
"acos",
"adaptive_avg_pool1d",
"adaptive_avg_pool2d",
"adaptive_avg_pool3d",
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"add",
"addcmul",
"addmm",
"alias",
"alpha_dropout_",
"alpha_dropout",
"amax",
"amin",
"aminmax",
"arange",
"argmax",
"argmin",
"as_strided",
"as_tensor",
"asin",
"atan",
"avg_pool1d",
"avg_pool2d",
"avg_pool3d",
"baddbmm",
"batch_norm",
"bernoulli",
"bitwise_not",
"bmm",
"broadcast_tensors",
"bucketize",
"cat",
"cdist",
"ceil",
"clamp_max",
"clamp_min",
"clamp",
"clone",
"constant_pad_nd",
"contiguous",
"convolution",
"conv_tbc",
"conv_transpose1d",
"conv_transpose2d",
"conv_transpose3d",
"conv1d",
"conv2d",
"conv3d",
"cos",
"cosine_similarity",
"cross",
"cumsum",
"detach",
"dim",
"div",
"dot",
"dropout_",
"dropout",
"elu",
"embedding_bag",
"embedding",
"empty_like",
"empty",
"eq",
"erf",
"exp",
"expand_as",
"expand",
"eye",
"feature_alpha_dropout_",
"feature_alpha_dropout",
"feature_dropout_",
"feature_dropout",
"fill",
"flatten",
"floor_divide",
"floor",
"floordiv",
"frobenius_norm",
"full_like",
"full",
"gather",
"ge",
"gelu",
"get_pool_ceil_padding",
"glu",
"group_norm",
"gru",
"gt_impl",
"gt",
"hann_window",
"hardshrink",
"hardsigmoid",
"hardswish",
"hardtanh",
"index_add",
"index_copy",
"index_fill",
"index_put",
"index_select",
"index",
"instance_norm",
"is_floating_point",
"isnan",
"item",
"kl_div",
"layer_norm",
"le",
"leaky_relu",
"lerp",
"lift",
"linalg_cross",
"linalg_matrix_norm",
"linalg_norm",
"linalg_vector_norm",
"linear",
"linspace",
"log_sigmoid",
"log_softmax",
"log",
"log10",
"log1p",
"log2",
"logical_and",
"logical_or",
"logical_xor",
"logsumexp",
"lstm_cell",
"lstm",
"lt_impl",
"lt",
"masked_fill",
"matmul",
"max_pool1d_with_indices",
"max_pool1d",
"max_pool2d_with_indices",
"max_pool2d",
"max_pool3d_with_indices",
"max_pool3d",
"max",
"maximum",
"mean",
"meshgrid",
"min",
"minimum",
"mish",
"mm",
"movedim",
"mul",
"multinomial",
"mv",
"narrow",
"native_layer_norm",
"ne",
"neg",
"new_empty",
"new_full",
"new_ones",
"new_zeros",
"nonzero_numpy",
"nonzero",
"norm",
"numel",
"numpy_T",
"one_hot",
"ones_like",
"ones",
"Onnx",
"op_with_optional_float_cast",
"overload_by_arg_count",
"pad",
"pairwise_distance",
"permute",
"pixel_shuffle",
"pixel_unshuffle",
"pow",
"prelu",
"Prim",
"prod",
"rand_like",
"rand",
"randn_like",
"randn",
"reciprocal",
"reflection_pad",
"reflection_pad1d",
"reflection_pad2d",
"reflection_pad3d",
"relu",
"relu6",
"remainder",
"repeat_interleave",
"repeat",
"replication_pad",
"replication_pad1d",
"replication_pad2d",
"replication_pad3d",
"reshape_as",
"reshape",
"rnn_relu",
"rnn_tanh",
"roll",
"rrelu",
"rsqrt",
"rsub",
"scalar_tensor",
"scatter_add",
"scatter",
"select",
"selu",
"sigmoid",
"sign",
"silu",
"sin",
"size",
"slice",
"softmax",
"softplus",
"softshrink",
"sort",
"split_with_sizes",
"split",
"sqrt",
"square",
"squeeze",
"stack",
"std_mean",
"std",
"sub",
"sum",
"t",
"take",
"tan",
"tanh",
"tanhshrink",
"tensor",
"threshold",
"to",
"topk",
"transpose",
"true_divide",
"type_as",
"unbind",
"unfold",
"unsafe_chunk",
"unsafe_split_with_sizes",
"unsafe_split",
"unsqueeze",
"unused",
"upsample_bilinear2d",
"upsample_linear1d",
"upsample_nearest1d",
"upsample_nearest2d",
"upsample_nearest3d",
"upsample_trilinear3d",
"var_mean",
"var",
"view_as",
"view",
"where",
"wrap_logical_op_with_cast_to",
"wrap_logical_op_with_negation",
"zeros_like",
"zeros",
]
# used to represent "missing" optional inputs
def unused(g):
n = g.op("prim::Constant")
n.setType(_C.OptionalType.ofTensor())
return n
def _shape_as_tensor(g, input):
return g.op("Shape", input)
def _reshape_from_tensor(g, input, shape):
if isinstance(shape, list):
shape = g.op("Concat", *shape, axis_i=0)
return reshape(g, input, shape)
def reshape(g, self, shape):
return symbolic_helper._reshape_helper(g, self, shape)
def reshape_as(g, self, other):
shape = g.op("Shape", other)
return reshape(g, self, shape)
def add(g, self, other, alpha=None):
if symbolic_helper._is_value(self) and symbolic_helper._is_tensor_list(self):
return symbolic_helper._onnx_opset_unsupported_detailed(
"Add", 9, 11, "Add between list of tensors not supported"
)
if alpha and symbolic_helper._scalar(symbolic_helper._maybe_get_scalar(alpha)) != 1:
other = g.op("Mul", other, alpha)
return g.op("Add", self, other)
def sub(g, self, other, alpha=None):
if alpha and symbolic_helper._scalar(symbolic_helper._maybe_get_scalar(alpha)) != 1:
other = g.op("Mul", other, alpha)
return g.op("Sub", self, other)
def rsub(g, self, other, alpha=None):
return sub(g, other, self, alpha=alpha)
def mul(g, self, other):
if symbolic_helper._is_bool(self) and symbolic_helper._is_bool(other):
# ONNX Mul doesn't support Boolean, so use And as an equivalent operator.
return g.op("And", self, other)
else:
return g.op("Mul", self, other)
def div(g, self, other, *args):
if len(args) == 0:
return true_divide(g, self, other)
else:
return _div_rounding_mode(g, self, other, *args)
@symbolic_helper.parse_args("v", "v", "v", "f")
def addcmul(g, self, tensor1, tensor2, value=1.0):
value_tens = g.op("Constant", value_t=torch.tensor([value]))
return add(g, self, mul(g, mul(g, tensor1, tensor2), value_tens))
@symbolic_helper.parse_args("v", "v", "s")
def _div_rounding_mode(g, self, other, rounding_mode):
if rounding_mode is None:
return true_divide(g, self, other)
elif rounding_mode == "floor":
return _floor_divide(g, self, other)
elif rounding_mode == "trunc":
return _trunc_divide(g, self, other)
else:
raise RuntimeError(
f'Unsupported rounding mode: "{rounding_mode}". Expected None, "floor" or "trunc"'
)
def _trunc_divide(g, self, other):
out = g.op("Div", self, other)
# the correct operation is truncate, which is not supported in ONNX,
# we cannot call floor since it will behave differently for negative numbers
# (eg. -0.1 should become -0 )
# - if scalar_type information are not available, assume that
# we need to call floor (treat as float)
out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.INT64)
# Matching PyTorch's behavior:
# - if self is fp the output's type is self's type
# - if self is not fp and other is fp, the output is of type "Float"
# - self is not fp and other is not fp, the output's type is self's output type
# - the output type defaults to Float
scalar_type = self.type().scalarType()
if scalar_type is not None:
if (
not symbolic_helper._is_fp(self)
and other.type().scalarType() is not None
and symbolic_helper._is_fp(other)
):
out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.FLOAT)
else:
out = g.op(
"Cast",
out,
to_i=_type_utils.JitScalarType.from_name(scalar_type).onnx_type(),
)
else:
out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.FLOAT)
return out
def _floor_divide(g, self, other):
if symbolic_helper._is_fp(self) or symbolic_helper._is_fp(other):
out = true_divide(g, self, other)
return g.op("Floor", out)
else:
# Integer division does trunction rounding
div = g.op("Div", self, other)
# Division is negative if: self < 0 != other < 0
zero = g.op("Constant", value_t=torch.tensor(0, dtype=torch.int64))
negative = g.op(
"Xor",
symbolic_helper._lt_helper(g, self, zero),
symbolic_helper._lt_helper(g, other, zero),
)
# For negative numbers with self % other != 0, subtract 1 to round down instead of up
mod = g.op("Sub", self, g.op("Mul", div, other))
fixup_mask = g.op("And", negative, g.op("Not", g.op("Equal", mod, zero)))
one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64))
fixup = g.op("Mul", fixup_mask, one)
return g.op("Sub", div, fixup)
def floor_divide(g, self, other):
# Deprecated behavior, floor_divide actually truncates
return _trunc_divide(g, self, other)
def floordiv(g, self, other):
return floor_divide(g, self, other)
def true_divide(g, self, other):
"""Division where both inputs are cast to floating types
If both inputs are floating, performs div as usual
If only one input is a floating type, the other input is cast to its type
If neither input is a floating type, both inputs are cast to the default scalar type
"""
# Case 1: either values are floating
# Performs div as usual.
# Implicit casting will be handled in scalar type analysis pass.
if symbolic_helper._is_fp(self) or symbolic_helper._is_fp(other):
return g.op("Div", self, other)
# Case 2: neither is floating
# Casts both inputs to the default scalar type
scalar_type = torch.get_default_dtype()
onnx_scalar_type = _C_onnx.TensorProtoDataType.FLOAT
assert scalar_type is torch.float or scalar_type is torch.double
if torch.get_default_dtype() is torch.double:
onnx_scalar_type = _C_onnx.TensorProtoDataType.DOUBLE
self = g.op("Cast", self, to_i=onnx_scalar_type)
other = g.op("Cast", other, to_i=onnx_scalar_type)
return g.op("Div", self, other)
def reciprocal(g, self):
# torch.reciprocal implicitly casts to float, so we do the same.
if not symbolic_helper._is_fp(self):
self = g.op("Cast", self, to_i=_C_onnx.TensorProtoDataType.FLOAT)
return g.op("Reciprocal", self)
@symbolic_helper.parse_args("v", "i")
def cat(g, tensor_list, dim):
tensors = symbolic_helper._unpack_list(tensor_list)
return g.op("Concat", *tensors, axis_i=dim)
@symbolic_helper.parse_args("v", "i")
def stack(g, tensor_list, dim):
unsqueezed = [
symbolic_helper._unsqueeze_helper(g, t, [dim])
for t in symbolic_helper._unpack_list(tensor_list)
]
return g.op("Concat", *unsqueezed, axis_i=dim)
def _list(g, self):
return self
def mm(g, self, other):
# Create a dummy C tensor. Only needed for API purposes, the value is
# since beta = 0
C = g.op("Constant", value_t=torch.tensor([1]))
return g.op("Gemm", self, other, C, beta_f=0.0, alpha_f=1.0)
def bmm(g, self, other):
return g.op("MatMul", self, other)
def matmul(g, self, other):
return g.op("MatMul", self, other)
@symbolic_helper.parse_args("v", "v", "v", "t", "t")
def addmm(g, self, mat1, mat2, beta, alpha):
dtype = None
self_dtype = symbolic_helper._try_get_scalar_type(self)
mat1_dtype = symbolic_helper._try_get_scalar_type(mat1)
mat2_dtype = symbolic_helper._try_get_scalar_type(mat2)
if self_dtype is not None:
dtype = self_dtype
elif mat1_dtype is not None:
dtype = mat1_dtype
elif mat2_dtype is not None:
dtype = mat2_dtype
mat1_rank = symbolic_helper._get_tensor_rank(mat1)
mat2_rank = symbolic_helper._get_tensor_rank(mat2)
def isNotNoneAnd(v, u):
return v is not None and v != u
if dtype is not None and (isNotNoneAnd(mat1_rank, 2) or isNotNoneAnd(mat2_rank, 2)):
scalar_type = _type_utils.JitScalarType.from_name(dtype)
res1 = g.op("MatMul", mat1, mat2)
res2 = self
alpha = symbolic_helper._scalar(alpha)
beta = symbolic_helper._scalar(beta)
if alpha != 1:
alpha = g.op(
"Constant", value_t=torch.tensor(alpha, dtype=scalar_type.dtype())
)
res1 = g.op("Mul", res1, alpha)
if beta != 1:
beta = g.op(
"Constant",
value_t=torch.tensor(
symbolic_helper._scalar(beta), dtype=scalar_type.dtype()
),
)
res2 = g.op("Mul", res2, beta)
return g.op("Add", res1, res2)
return g.op(
"Gemm",
mat1,
mat2,
self,
beta_f=symbolic_helper._scalar(beta),
alpha_f=symbolic_helper._scalar(alpha),
)
def neg(g, self):
return g.op("Neg", self)
def sqrt(g, self):
return g.op("Sqrt", self)
def rsqrt(g, self):
return g.op(
"Div", symbolic_helper._if_scalar_type_as(g, torch.ones(1), self), sqrt(g, self)
)
def tanh(g, self):
return g.op("Tanh", self)
def sin(g, self):
return g.op("Sin", self)
def cos(g, self):
return g.op("Cos", self)
def tan(g, self):
return g.op("Tan", self)
def asin(g, self):
return g.op("Asin", self)
def acos(g, self):
return g.op("Acos", self)
def atan(g, self):
return g.op("Atan", self)
# Fixed scale and zero_point, discovered from aten/src/ATen/native/quantized/cpu/qsigmoid.cpp
@symbolic_helper.quantized_args(True, scale=1.0 / 256.0, zero_point=0)
def sigmoid(g, self):
return g.op("Sigmoid", self)
def sign(g, self):
return g.op("Sign", self)
def _slice(g, input, axes, starts, ends):
assert len(starts) == len(ends)
if len(starts) == 1 and starts[0] == 0 and ends[0] == 9223372036854775807:
return input
return g.op("Slice", input, axes_i=axes, starts_i=starts, ends_i=ends)
def _maybe_cast_reduce_op_input(g, self):
dtype = self.type().scalarType()
# This check only covers traced modules where dtype is present
if dtype is not None:
# pytorch reduce-ops cast all other integral types to int64
if not symbolic_helper._is_fp(self) and not (dtype == "Long"):
self = _cast_Long(g, self, False) # type: ignore[name-defined]
return self
def _reduce_op_symbolic(onnx_op_name, allow_multi_dim_support=True):
def symbolic(g, self, dim=None, keepdim=None):
self = _maybe_cast_reduce_op_input(g, self)
if dim is None:
# all-reduce path
return symbolic_helper._handle_reduce_dim_none(g, self, onnx_op_name)
else:
# dim-reduce path
desc = "is" if allow_multi_dim_support else "i"
dim, keepdim = symbolic_helper._get_const(
dim, desc, "dim"
), symbolic_helper._get_const(keepdim, "i", "keepdim")
dim_list = dim if allow_multi_dim_support else [dim]
return g.op(onnx_op_name, self, axes_i=dim_list, keepdims_i=keepdim)
return symbolic
def overload_by_arg_count(fn):
@functools.wraps(fn)
def wrapper(g, *args):
overloads = fn(g, *args)
last_exception = None
for overload in overloads:
arg_descriptors = overload._arg_descriptors
if len(arg_descriptors) == len(args):
return overload(g, *args)
raise NotImplementedError(f"Unknown aten::{fn.__name__} signature")
return wrapper
def _reduce_with_dtype(onnx_op, name, allow_multi_dim_support=True):
symbolic = _reduce_op_symbolic(
onnx_op, allow_multi_dim_support=allow_multi_dim_support
)
@overload_by_arg_count
def reduce(g, *args, **kwargs):
@symbolic_helper.parse_args("v", "none")
def reduce_nodim(g, self, dtype):
if dtype.node().kind() == "onnx::Constant":
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
self = g.op(
"Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type()
)
elif dtype.node().kind() != "prim::Constant":
return symbolic_helper._unimplemented(name, "dtype")
return symbolic(g, self)
dim_desc = "is" if allow_multi_dim_support else "i"
@symbolic_helper.parse_args("v", dim_desc, "i", "none") # type: ignore[arg-type]
def reduce_dim(g, self, dim, keepdim, dtype):
if dtype.node().kind() == "onnx::Constant":
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
self = g.op(
"Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type()
)
elif dtype.node().kind() != "prim::Constant":
return symbolic_helper._unimplemented(name, "dtype")
return symbolic(g, self, dim, keepdim)
return reduce_nodim, reduce_dim
return reduce
sum = _reduce_with_dtype("ReduceSum", "sum")
mean = _reduce_with_dtype("ReduceMean", "mean")
# torch.prod does not support multidimensional "dim"
prod = _reduce_with_dtype("ReduceProd", "prod", allow_multi_dim_support=False)
@symbolic_helper.parse_args("v", "i", "none")
def cumsum(g, input, dim, dtype):
if symbolic_helper.is_caffe2_aten_fallback():
if dtype.node().kind() != "prim::Constant":
return symbolic_helper._unimplemented("cumsum", "dtype")
return g.at("cumsum", input, dim_i=dim)
else:
symbolic_helper._onnx_opset_unsupported("cumsum", 9, 11)
def _sample_dirichlet(g, self, generator):
if symbolic_helper.is_caffe2_aten_fallback():
if not symbolic_helper._is_none(generator):
return symbolic_helper._unimplemented(
"_sample_dirichlet", "We are not able to export generator"
)
return g.at("_sample_dirichlet", self)
else:
return symbolic_helper._onnx_unsupported("_sample_dirichlet")
def _standard_gamma(g, self, generator):
if symbolic_helper.is_caffe2_aten_fallback():
if not symbolic_helper._is_none(generator):
return symbolic_helper._unimplemented(
"_standard_gamma", "We are not able to export generator"
)
return g.at("_standard_gamma", self)
else:
return symbolic_helper._onnx_unsupported("_standard_gamma")
def t(g, self):
return g.op("Transpose", self, perm_i=(1, 0))
def numpy_T(g, input):
ndim = symbolic_helper._get_tensor_rank(input)
assert ndim is not None
perm = list(reversed(range(0, ndim)))
return g.op("Transpose", input, perm_i=perm)
def expand(g, self, size, implicit):
size = symbolic_helper._maybe_get_const(size, "is")
if not symbolic_helper._is_value(size):
size = g.op("Constant", value_t=torch.LongTensor(size))
elif symbolic_helper._is_packed_list(size):
# Expand with -1 dim value means dim is unchanged.
# Since onnx::expand supports two-way broadcasting,
# -1 dim value can be exported to onnx as 1
size = symbolic_helper._reshape_helper(
g, stack(g, size, 0), g.op("Constant", value_t=torch.tensor([-1]))
)
dtype = _type_utils.JitScalarType.INT64
ones = ones_like(g, size, dtype)
neg_ones = mul(g, ones, g.op("Constant", value_t=torch.tensor(-1)))
size = where(g, g.op("Equal", size, neg_ones), ones, size)
return g.op("Expand", self, size)
def expand_as(g, self, other):
self_t = symbolic_helper._maybe_get_const(self, "t")
if isinstance(self_t, torch.Tensor):
orig_type = self_t.dtype
self_t = self_t.to(torch.double)
dims = []
for d in range(self_t.dim()):
if torch.equal(self_t.mean(d).unsqueeze(d).expand_as(self_t), self_t):
dims.append(d)
self = g.op("Constant", value_t=self_t.mean(dims).to(orig_type))
shape = g.op("Shape", other)
return g.op("Expand", self, shape)
@symbolic_helper.parse_args("v", "v", "i", "b", "v")
def embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse):
if scale_grad_by_freq and GLOBALS.export_training:
raise RuntimeError(
"Unsupported: ONNX export of embedding with scale_grad_by_freq=True "
"for training mode. ONNX does not support scaling the gradients."
)
if padding_idx >= 0 and GLOBALS.export_training:
warnings.warn(
"Warning: ONNX export of embedding with padding_idx >= 0 "
"for training mode. "
"ONNX does not support not updating the embedding vector at padding_idx during training."
)
return g.op("Gather", weight, indices)
@symbolic_helper.parse_args("v", "v", "v", "i", "i", "i", "v", "i", "i")
def embedding_bag(
g,
embedding_matrix,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
padding_idx,
):
if not symbolic_helper._is_none(per_sample_weights):
return symbolic_helper._onnx_unsupported(
"embedding_bag with per_sample_weights"
)
if symbolic_helper.is_caffe2_aten_fallback():
return g.at(
"embedding_bag",
embedding_matrix,
indices,
offsets,
outputs=4,
scale_grad_by_freq_i=scale_grad_by_freq,
mode_i=mode,
sparse_i=sparse,
include_last_offset_i=include_last_offset,
padding_idx_i=padding_idx,
)
else:
return symbolic_helper._onnx_unsupported("embedding_bag")
def size(g, self, dim=None):
if dim is None:
return g.op("Shape", self)
if symbolic_helper._maybe_get_const(dim, "i") < 0:
rank = symbolic_helper._get_tensor_rank(self)
if rank is not None:
dim = symbolic_helper._maybe_get_const(dim, "i") + rank
dim = g.op("Constant", value_t=torch.tensor(dim))
return symbolic_helper._size_helper(g, self, dim)
@symbolic_helper.parse_args("v", "i", "i")
def transpose(g, self, dim0, dim1):
if dim0 == dim1: # micro-optimization
return self
# NB: Transpose in ONNX is actually a Permute
rank = symbolic_helper._get_tensor_rank(self)
if rank is not None:
axes = list(range(rank))
axes[dim0], axes[dim1] = axes[dim1], axes[dim0]
return g.op("Transpose", self, perm_i=axes)
else:
# if we don't have dim information we cannot
# output a permute so use ATen instead
if symbolic_helper.is_caffe2_aten_fallback():
return g.at(
"transpose", self, overload_name="int", dim0_i=dim0, dim1_i=dim1
)
else:
raise RuntimeError(
"Unsupported: ONNX export of transpose for tensor " "of unknown rank."
)
@symbolic_helper.parse_args("v", "is")
def permute(g, self, dims):
if dims == list(range(0, len(dims))):
return self
return g.op("Transpose", self, perm_i=dims)
def view(g, self, size):
return reshape(g, self, size)
def view_as(g, self, other):
shape = g.op("Shape", other)
return reshape(g, self, shape)
@symbolic_helper.parse_args("v", "i", "i", "i")
def unsafe_chunk(g, self, chunks, dim, _outputs=None):
if _outputs is None:
return symbolic_helper._onnx_opset_unsupported_detailed(
"unsafe_chunk", 9, 11, "Dynamic number of outputs not supported"
)
size = symbolic_helper._get_tensor_dim_size(self, dim)
if size is None:
return symbolic_helper._unimplemented("unsafe_chunk", "unknown dimension size")
split_size = (size + chunks - 1) // chunks
splits = [split_size] * (size // split_size)
leftover = size % split_size
if leftover:
splits.append(leftover)
return g.op("Split", self, split_i=splits, axis_i=dim, outputs=_outputs)
@symbolic_helper.parse_args("v", "v", "v", "i")
def split(g, self, split_size_or_sizes, dim, _outputs=None):
if not symbolic_helper._is_split_static(split_size_or_sizes, _outputs):
return symbolic_helper._onnx_opset_unsupported_detailed(
"split", 9, 11, "Dynamic number of outputs not supported"
)
split_val = symbolic_helper._node_get(split_size_or_sizes.node(), "value")
if split_val.dim() > 0:
return split_with_sizes(g, self, split_size_or_sizes, dim, _outputs)
split_size = symbolic_helper._get_const(split_size_or_sizes, "i", "split_size")
dim = symbolic_helper._get_const(dim, "i", "dim")
size = symbolic_helper._get_tensor_dim_size(self, dim)
if size is None:
if _outputs is not None:
size = split_size * _outputs
else:
return symbolic_helper._onnx_opset_unsupported_detailed(
"split", 9, 11, "Unknown dimension size not supported"
)
splits = [split_size] * (size // split_size)
leftover = size % split_size
if leftover:
splits.append(leftover)
return g.op("Split", self, split_i=splits, axis_i=dim, outputs=_outputs)
def unsafe_split(g, self, split_size_or_sizes, dim, _outputs=None):
return split(g, self, split_size_or_sizes, dim, _outputs)
@symbolic_helper.parse_args("v", "is", "i", "i")
def split_with_sizes(g, self, split_sizes, dim, _outputs=None):
if not symbolic_helper._is_split_static(split_sizes, _outputs):
return symbolic_helper._onnx_opset_unsupported_detailed(
"split_with_sizes", 9, 11, "Dynamic number of outputs not supported"
)
return g.op("Split", self, split_i=split_sizes, axis_i=dim, outputs=_outputs)
def unsafe_split_with_sizes(g, self, split_sizes, dim, _outputs=None):
return split_with_sizes(g, self, split_sizes, dim, _outputs)
@symbolic_helper.parse_args("v", "i", "i")
def unbind(g, self, dim=0, _outputs=None):
if _outputs is None:
return symbolic_helper._onnx_opset_unsupported_detailed(
"unbind", 9, 11, "Dynamic number of outputs not supported"
)
outputs = g.op("Split", self, split_i=[1] * _outputs, axis_i=dim, outputs=_outputs)
outputs = [outputs] if _outputs == 1 else outputs
squeezed_outputs = [
symbolic_helper._squeeze_helper(g, out, [dim]) for out in outputs
]
return squeezed_outputs
@symbolic_helper.parse_args("v", "i", "v")
def select(g, self, dim, index):
index = symbolic_helper._maybe_get_scalar(index)
if (not symbolic_helper._is_value(index)) and (index < 0):
if index == -1:
end_index = 9223372036854775807
else:
end_index = index + 1
slice_node = symbolic_helper._slice_helper(
g, self, axes=[dim], starts=[index], ends=[end_index]
)
return symbolic_helper._squeeze_helper(g, slice_node, [dim])
else:
return g.op("Gather", self, index, axis_i=dim)
def square(g, self):
return g.op("Mul", self, self)
def squeeze(g, self, dim=None):
if dim is None:
return g.op("Squeeze", self)
squeeze_dim = symbolic_helper._get_const(dim, "i", "dim")
# Handle negative dims
if squeeze_dim < 0:
rank = symbolic_helper._get_tensor_rank(self)
if rank is not None:
warnings.warn(
"ONNX export squeeze with negative axis "
+ str(squeeze_dim)
+ " might cause the onnx model to be incorrect. "
+ "Negative axis is not supported in ONNX. "
+ "Axis is converted to "
+ str(squeeze_dim + rank)
+ " based on input shape at export time. "
+ "Passing an tensor of different rank in execution will be incorrect."
)
squeeze_dim += rank
else:
return symbolic_helper._unimplemented(
"squeeze", "negative axis with unknown input rank"
)
dim_size = symbolic_helper._get_tensor_dim_size(self, squeeze_dim)
if dim_size is None:
warnings.warn(
"This model contains a squeeze operation on dimension "
+ str(squeeze_dim)
+ " on an input "
+ "with unknown shape. Note that if the size of dimension "
+ str(squeeze_dim)
+ " of the input "
+ "is not 1, the ONNX model will return an error. Opset version 11 supports squeezing on "
+ "non-singleton dimensions, it is recommended to export this model using opset "
+ "version 11 or higher."
)
return symbolic_helper._squeeze_helper(g, self, axes_i=[squeeze_dim])
if dim_size > 1:
warnings.warn(
"This model contains a squeeze operation on dimension "
+ str(squeeze_dim)
+ ". The size of "
+ "this dimension in the given input is "
+ str(dim_size)
+ ". The model will "
+ "be exported without the squeeze node. If the model is intended to be used with dynamic "
+ "input shapes, please use opset version 11 to "
+ "export the model."
)
return self
warnings.warn(
"This model contains a squeeze operation on dimension "
+ str(squeeze_dim)
+ ". If the model is "
+ "intended to be used with dynamic input shapes, please use opset version 11 to export the model."
)
return symbolic_helper._squeeze_helper(g, self, axes_i=[squeeze_dim])
def prelu(g, self, weight):
self_rank = symbolic_helper._get_tensor_rank(self)
weight_sizes = symbolic_helper._get_tensor_sizes(weight)
weight_rank = len(weight_sizes)
if self_rank is not None:
if self_rank > 2:
# make weight unidirectional broadcastable
weight = symbolic_helper._unsqueeze_helper(
g, weight, list(range(1, self_rank - 1))
)
elif self_rank == 0 and weight_sizes == [1]:
# self and weight are both scalar but weight has rank == 1, squeeze weight.
weight = symbolic_helper._squeeze_helper(g, weight, [0])
weight_rank = 0
if self_rank is not None and weight_rank is not None:
assert (
self_rank >= weight_rank
), f"rank(x) should be >= rank(slope) but got {self_rank} < {weight_rank}"
return g.op("PRelu", self, weight)
def silu(g, input):
return g.op("Mul", input, g.op("Sigmoid", input))
def mish(g, input):
return g.op("Mul", input, g.op("Tanh", g.op("Softplus", input)))
def op_with_optional_float_cast(g, op_name, *args, **kwargs):
"""Some PyTorch operators (e.g., Clip/Min/ReLU/Pad) are super set of ONNX in terms of data types.
This function maximizes the exportability of PyTorch-ONNX by allowing ONNX-unsupported PyTorch
operator data type. For example, `Cast<int>(Clip<float>(Cast<float>(INPUT)))` can be used to mimic
`Clip<int>(INPUT)` (opset version < 12).
Args:
g (torch._C.Graph): graph to write the ONNX representation into.
op_name (str): operator name in ONNX.
*args (tuple): operands to the operator.
**kwargs (dict): attributes to the operator along with "opset_before" (optional, None by default)
indicating the smallest opset version to trigger such casting behavior and "target_float_t"
(optional, "Float" by default) indicating the data type of internal operator.
Returns:
Optional[torch._C.Value, Tuple[torch._C.Value, ...]]: output(s) of the operator.
"""
opset_before = kwargs.pop("opset_before", None)
target_float_t = kwargs.pop("target_float_t", "Float")
inputs = list(args)
dtype_0 = inputs[0].type().scalarType()
require_cast = not symbolic_helper._is_fp(inputs[0]) and (
opset_before is None or GLOBALS.export_onnx_opset_version < opset_before
)
if require_cast:
for input in inputs:
if input.isCompleteTensor() and input.type().scalarType() != dtype_0:
raise RuntimeError(
f"Inputs of {op_name} must have same dtype. Got {dtype_0} and {input.type().scalarType()}"
)
for i, input in enumerate(inputs):
if input.isCompleteTensor() and not symbolic_helper._is_fp(input):
inputs[i] = g.op(
"Cast",
input,
to_i=_type_utils.JitScalarType.from_name(
target_float_t
).onnx_type(),
)
self = g.op(op_name, *inputs, **kwargs)
if require_cast:
self = g.op(
"Cast", self, to_i=_type_utils.JitScalarType.from_name(dtype_0).onnx_type()
)
return self
@symbolic_helper.quantized_args(True)
def relu(g, input):
return op_with_optional_float_cast(g, "Relu", input, opset_before=14)
@symbolic_helper.quantized_args(True)
def relu6(g, input):
relu = op_with_optional_float_cast(g, "Relu", input, opset_before=14)
return clamp_max(g, relu, 6)
def ceil(g, input):
return g.op("Ceil", input)
def floor(g, input):
return g.op("Floor", input)
def _len(g, self):
sz_0 = size(g, self, g.op("Constant", value_t=torch.LongTensor([0])))
return symbolic_helper._squeeze_helper(g, sz_0, [0])
@symbolic_helper.parse_args("v", "t", "t")
def threshold(g, self, threshold, value):
# See Note [Export inplace]
if symbolic_helper._scalar(threshold) != 0:
return symbolic_helper._unimplemented("threshold", "non-zero threshold")
if symbolic_helper._scalar(value) != 0:
return symbolic_helper._unimplemented("threshold", "non-zero value")
return g.op("Relu", self)
def leaky_relu(g, input, negative_slope, inplace=False):
negative_slope = symbolic_helper._get_const(negative_slope, "t", "negative_slope")
# See Note [Export inplace]
# TODO: Talk to ONNX about unconditional cast of scalar to float
return g.op("LeakyRelu", input, alpha_f=symbolic_helper._scalar(negative_slope))
@symbolic_helper.parse_args("v", "i")
def glu(g, input, dim):
dim_size = symbolic_helper._get_tensor_dim_size(input, dim)
if dim_size is not None:
assert dim_size % 2 == 0
first, second = g.op("Split", input, axis_i=dim, outputs=2)
return g.op("Mul", first, g.op("Sigmoid", second))
@symbolic_helper.parse_args("v", "i", "none")
def softmax(g, input, dim, dtype=None):
# Softmax does normalization at vector level.
# PyTorch and ONNX use different strategies to split the input tensor into vectors.
# Thus dim and axis have different meanings.
# PyTorch slices the input tensor into vectors along the `dim`-th dimension.
# ONNX reshapes the input into a 2-D tensor, and `axis` indicates where the input is coerced.
# If input is a 2 x 3 tensor:
# input = [[1.0, 1.0, 1.0],
# [1.0, 1,0, 1,0]]
# with dim = 0, the result is:
# result = [[0.5, 0.5, 0.5],
# [0.5, 0.5, 0.5]]
# with axis = 0, the result is:
# result = [[0.167, 0.167, 0.167],
# [0.167, 0.167, 0.167]]
# So only when dim and axis both equal to ndim - 1 (the last dimension),
# their semantics are equivalent.
# So use softmax when dim and axis both equal to ndim - 1,
# otherwise transpose the input to put the vectors to be normalized to the last dimension.
# When input rank is not known at export time we compute softmax using a subgraph
# with other operators
input_dim = symbolic_helper._get_tensor_rank(input)
if input_dim is not None:
# TODO: remove this as onnx opset 11 spec allows negative axes
if dim < 0:
dim = input_dim + dim
is_transpose_required = input_dim != dim + 1
if is_transpose_required:
axes = list(range(input_dim))
axes[dim], axes[-1] = axes[-1], axes[dim]
input = g.op("Transpose", input, perm_i=axes)
dim = input_dim - 1
softmax = g.op("Softmax", input, axis_i=dim)
if dtype and dtype.node().kind() != "prim::Constant":
parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype")
softmax = g.op(
"Cast",
softmax,
to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type(),
)
if is_transpose_required:
softmax = g.op("Transpose", softmax, perm_i=axes)
return softmax
# Apply max normalization.
input = g.op("Sub", input, g.op("ReduceMax", input, axes_i=[dim], keepdims_i=1))
exp = g.op("Exp", input)
sum = symbolic_helper._reducesum_helper(g, exp, axes_i=[dim])
softmax = g.op("Div", exp, sum)
if dtype and dtype.node().kind() != "prim::Constant":
parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype")
softmax = g.op(
"Cast", softmax, to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type()
)
return softmax
def softplus(g, self, beta, threshold):
beta_const = symbolic_helper._maybe_get_const(beta, "f")
if beta_const != 1:
return g.op("Div", g.op("Softplus", g.op("Mul", self, beta)), beta)
return g.op("Softplus", self)
def get_pool_ceil_padding(input, kernel_size, stride, padding):
sizes = symbolic_helper._get_tensor_sizes(input)
dim = sizes[-len(padding) :] if sizes is not None else None
if dim is None or any([i is None for i in dim]):
return symbolic_helper._unimplemented(
"get_pool_ceil_padding", "input size not accessible"
)
ceiled_output_dim = [
int(math.ceil((dim[i] + 2 * padding[i] - kernel_size[i]) / float(stride[i])))
+ 1
for i in range(0, len(padding))
]
# ensure last pooling starts inside
ceiled_output_dim = [
ceiled_output_dim[i] - 1
if (((ceiled_output_dim[i] - 1) * stride[i]) >= (dim[i] + padding[i]))
else ceiled_output_dim[i]
for i in range(0, len(ceiled_output_dim))
]
padding_ceil = [
0
if (stride[i] == 1)
else (
kernel_size[i]
- (dim[i] + 2 * padding[i] - ((ceiled_output_dim[i] - 1) * stride[i] + 1))
)
for i in range(0, len(padding))
]
# ensure padding is not > kernel_size
padding_ceil = [
(
int(padding_ceil[i])
if padding_ceil[i] < kernel_size[i] - 1
else int(kernel_size[i] - 1)
)
if ((padding_ceil[i] + 2 * padding[i]) >= (kernel_size[i]))
else int(padding_ceil[i])
for i in range(0, len(padding_ceil))
]
return padding_ceil
def _max_pool(name, tuple_fn, ndims, return_indices):
@symbolic_helper.quantized_args(True, False, False, False, False, False)
@symbolic_helper.parse_args("v", "is", "is", "is", "is", "i")
def symbolic_fn(g, input, kernel_size, stride, padding, dilation, ceil_mode):
if set(tuple_fn(dilation)) != {1}:
return symbolic_helper._unimplemented(name, "dilation")
if not stride:
stride = kernel_size
padding = tuple(tuple_fn(padding))
if ceil_mode:
padding_ceil = get_pool_ceil_padding(input, kernel_size, stride, padding)
padding = padding + tuple(a + b for (a, b) in zip(padding_ceil, padding))
else:
padding = padding * 2
kwargs = {
"kernel_shape_i": tuple_fn(kernel_size),
"pads_i": padding,
"strides_i": tuple_fn(stride),
}
# easy but hacky way to get flattened indices values
# to be used to convert the indices values to non-flattened.
# In ONNX the indices are computed as a flatten 1-D tensor,
# so the values in indices are in [0, N x C x D1 x ... x Dn).
# To convert the indices to the same format used by Pytorch,
# we first execute a maxpool with a kernel and stride of 1 on the same input.
# This will result in a tensor of indices in which each index will have it's own value.
# Using this tensor as a reference, we extract the first index of each axis and substract
# it from each index of this axis in the indices to convert.
# This step will result in a tensor were each dimension has values of indices within
# the dimension it is in.
# For more information :
# https://github.com/pytorch/pytorch/pull/16455#issuecomment-460776407
if return_indices:
r, indices = g.op("MaxPool", input, outputs=2, **kwargs)
_, flattened_indices = g.op(
"MaxPool",
input,
outputs=2,
kernel_shape_i=[1 for _ in range(ndims)],
strides_i=[1 for _ in range(ndims)],
)
# convert indices to have non-flattened indices values
s = symbolic_helper._slice_helper(
g,
flattened_indices,
axes=[2 + i for i in range(ndims)],
starts=tuple_fn(0),
ends=tuple_fn(1),
)
indices = sub(g, indices, s)
return r, indices
else:
r = g.op("MaxPool", input, outputs=1, **kwargs)
return r
return symbolic_fn
max_pool1d = _max_pool(
"max_pool1d", torch.nn.modules.utils._single, 1, return_indices=False
)
max_pool2d = _max_pool(
"max_pool2d", torch.nn.modules.utils._pair, 2, return_indices=False
)
max_pool3d = _max_pool(
"max_pool3d", torch.nn.modules.utils._triple, 3, return_indices=False
)
max_pool1d_with_indices = _max_pool(
"max_pool1d_with_indices",
torch.nn.modules.utils._single,
1,
return_indices=True,
)
max_pool2d_with_indices = _max_pool(
"max_pool2d_with_indices",
torch.nn.modules.utils._pair,
2,
return_indices=True,
)
max_pool3d_with_indices = _max_pool(
"max_pool3d_with_indices",
torch.nn.modules.utils._triple,
3,
return_indices=True,
)
def _avg_pool(name, tuple_fn):
@symbolic_helper.quantized_args(True)
@symbolic_helper.parse_args("v", "is", "is", "is", "i", "i", "none")
def symbolic_fn(
g,
input: _C.Value,
kernel_size: Tuple[int, ...],
stride: Tuple[int, ...],
padding: Union[int, Tuple[int, ...]],
ceil_mode: int,
count_include_pad: int,
divisor_override=None,
):
if not stride:
stride = kernel_size
padding = symbolic_helper._avgpool_helper(
tuple_fn, padding, kernel_size, stride, divisor_override, name
)
adjusted_padding = padding
if count_include_pad:
input = g.op(
"Pad",
input,
pads_i=((0,) * 2 + padding) * 2,
mode_s="constant",
value_f=0.0,
)
adjusted_padding = (0,) * len(padding)
if ceil_mode:
padding_ceil = get_pool_ceil_padding(input, kernel_size, stride, padding)
adjusted_padding = adjusted_padding + tuple(
a + b for (a, b) in zip(padding_ceil, adjusted_padding)
)
else:
adjusted_padding = adjusted_padding * 2
output = g.op(
"AveragePool",
input,
kernel_shape_i=tuple_fn(kernel_size),
strides_i=tuple_fn(stride),
pads_i=adjusted_padding,
)
return output
return symbolic_fn
avg_pool1d = _avg_pool("avg_pool1d", torch.nn.modules.utils._single)
avg_pool2d = _avg_pool("avg_pool2d", torch.nn.modules.utils._pair)
avg_pool3d = _avg_pool("avg_pool3d", torch.nn.modules.utils._triple)
def _adaptive_pool(name, type, tuple_fn, fn=None):
@symbolic_helper.quantized_args(True, False)
def symbolic_fn(g, input, output_size):
# _adaptive_pool is supported for cases where output_size is 1 for all dimensions,
# by executing a GlobalPool.
# It is also supported for cases where the output size is a factor of the input size.
# For these cases the stride and kernel size are uniform along all the indices of
# the same dimension, which makes it possible to export it to ONNX.
# for MaxPool, GlobalMaxPool does not return indices,
# so we try using max_poolxd_with_indices, and if it is not possible
# (input is not a complete tensor or output size not factor of input size)
# then we call GlobalAveragePool and return None for the indices
try:
output_size = symbolic_helper._parse_arg(output_size, "is")
except Exception:
# FIXME(justinchuby): Avoid catching Exception.
# Catch a more specific exception instead.
return symbolic_helper._onnx_unsupported(
"adaptive pooling, since output_size is not constant."
)
if output_size == [1] * len(output_size) and type == "AveragePool":
return g.op("GlobalAveragePool", input)
sizes = symbolic_helper._get_tensor_sizes(input)
try:
dim = sizes[2:]
except Exception:
# FIXME(justinchuby): Avoid catching Exception.
# Catch a more specific exception instead.
dim = None
if dim is None or any([i is None for i in dim]):
if output_size == [1] * len(output_size):
return g.op("GlobalMaxPool", input), None
return symbolic_helper._unimplemented(name, "input size not accessible")
# verify if output size % input size = 0 for all dim
mod = [dim[i] % output_size[i] for i in range(0, len(dim))]
if mod != [0] * len(mod):
if output_size == [1] * len(output_size):
return g.op("GlobalMaxPool", input), None
return symbolic_helper._unimplemented(
name, "output size that are not factor of input size"
)
k = [int(dim[i] / output_size[i]) for i in range(0, len(dim))]
# call max_poolxd_with_indices to get indices in the output
if type == "MaxPool":
return fn(g, input, k, k, (0,) * len(dim), (1,) * len(dim), False)
output = g.op(type, input, kernel_shape_i=tuple_fn(k), strides_i=tuple_fn(k))
return output
return symbolic_fn
adaptive_avg_pool1d = _adaptive_pool(
"adaptive_avg_pool1d", "AveragePool", torch.nn.modules.utils._single
)
adaptive_avg_pool2d = _adaptive_pool(
"adaptive_avg_pool2d", "AveragePool", torch.nn.modules.utils._pair
)
adaptive_avg_pool3d = _adaptive_pool(
"adaptive_avg_pool3d", "AveragePool", torch.nn.modules.utils._triple
)
adaptive_max_pool1d = _adaptive_pool(
"adaptive_max_pool1d",
"MaxPool",
torch.nn.modules.utils._single,
max_pool1d_with_indices,
)
adaptive_max_pool2d = _adaptive_pool(
"adaptive_max_pool2d",
"MaxPool",
torch.nn.modules.utils._pair,
max_pool2d_with_indices,
)
adaptive_max_pool3d = _adaptive_pool(
"adaptive_max_pool3d",
"MaxPool",
torch.nn.modules.utils._triple,
max_pool3d_with_indices,
)
# Generate paddings in ONNX order based on pad in pytorch.
# Args:
# dim: the dimension of the tensor.
# pad: the paddings in pytorch.
# The order is dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, ...
def _prepare_onnx_paddings(dim, pad):
assert isinstance(dim, int)
# The desired order of paddings is
# dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end.
# n is the dimension of input.
# assume zero-dimensions in the beginning
paddings = list(pad[:]) + [0] * (dim * 2 - len(pad))
# reverse order and collate first beginnings and then ends
paddings = paddings[-2::-2] + paddings[-1::-2]
return paddings
def _convert_padding_node(padding):
padding = symbolic_helper._maybe_get_const(padding, "is")
if symbolic_helper._is_value(padding) and symbolic_helper._is_packed_list(padding):
input_list = symbolic_helper._unpack_list(padding)
try:
padding = [
symbolic_helper._get_const(v, "i", "padding") for v in input_list
]
except Exception:
# FIXME(justinchuby): Avoid catching Exception.
# Catch a more specific exception instead.
return symbolic_helper._onnx_opset_unsupported_detailed(
"Pad", 9, 11, "The sizes of the padding must be constant"
)
return padding
def constant_pad_nd(g, input, padding, value):
mode = "constant"
try:
value = symbolic_helper._get_const(value, "f", "value")
except Exception:
# FIXME(justinchuby): Avoid catching Exception.
# Catch a more specific exception instead.
return symbolic_helper._onnx_opset_unsupported_detailed(
"Pad", 9, 11, "The value for the padding must be constant"
)
padding = _convert_padding_node(padding)
paddings = _prepare_onnx_paddings(symbolic_helper._get_tensor_rank(input), padding)
return op_with_optional_float_cast(
g, "Pad", input, pads_i=paddings, mode_s=mode, value_f=value, opset_before=11
)
def _pad_circular(g, input, pad):
padding = _convert_padding_node(pad)
assert len(padding) % 2 == 0
ndim = len(padding) // 2
cur = input
for idx in range(ndim):
pad_l = padding[-(2 * idx + 1)]
pad_r = padding[-(2 * idx + 2)]
tensors = []
if pad_l > 0:
left = symbolic_helper._slice_helper(
g, cur, axes=[2 + idx], starts=[-(pad_l + 1)], ends=[-1]
)
tensors.append(left)
if pad_l < 0 or pad_r < 0:
middle = symbolic_helper._slice_helper(
g,
cur,
axes=[2 + idx],
starts=[max(0, -pad_l)],
ends=[-(1 + max(0, -pad_r))],
)
tensors.append(middle)
else:
tensors.append(cur)
if pad_r > 0:
right = symbolic_helper._slice_helper(
g, cur, axes=[2 + idx], starts=[0], ends=[pad_r]
)
tensors.append(right)
cur = g.op("Concat", *tensors, axis_i=(2 + idx))
return cur
def reflection_pad(g, input, padding):
mode = "reflect"
padding = _convert_padding_node(padding)
paddings = _prepare_onnx_paddings(symbolic_helper._get_tensor_rank(input), padding)
return op_with_optional_float_cast(
g, "Pad", input, pads_i=paddings, mode_s=mode, opset_before=11
)
def replication_pad(g, input, padding):
mode = "edge"
padding = _convert_padding_node(padding)
paddings = _prepare_onnx_paddings(symbolic_helper._get_tensor_rank(input), padding)
return op_with_optional_float_cast(
g, "Pad", input, pads_i=paddings, mode_s=mode, opset_before=11
)
reflection_pad1d = reflection_pad
reflection_pad2d = reflection_pad
reflection_pad3d = reflection_pad
replication_pad1d = replication_pad
replication_pad2d = replication_pad
replication_pad3d = replication_pad
def pad(g, input, pad, mode, value):
mode = symbolic_helper._parse_arg(mode, "s")
if mode == "replicate":
return replication_pad(g, input, pad)
elif mode == "reflect":
return reflection_pad(g, input, pad)
elif mode == "constant":
return constant_pad_nd(g, input, pad, value)
elif mode == "circular":
return _pad_circular(g, input, pad)
else:
raise RuntimeError(f"Unrecognized padding mode {mode}")
def _interpolate(name, dim, interpolate_mode):
def symbolic_fn(g, input, output_size, *args):
scales, align_corners = symbolic_helper._get_interpolate_attributes(
g, interpolate_mode, args
)
symbolic_helper._interpolate_warning(interpolate_mode)
align_corners = symbolic_helper._maybe_get_scalar(align_corners)
if align_corners:
return symbolic_helper._unimplemented(name, "align_corners == True")
if scales is None:
scales = symbolic_helper._interpolate_size_to_scales(
g, input, output_size, dim
)
return g.op("Upsample", input, scales, mode_s=interpolate_mode)
return symbolic_fn
upsample_nearest1d = _interpolate("upsample_nearest1d", 3, "nearest")
upsample_nearest2d = _interpolate("upsample_nearest2d", 4, "nearest")
upsample_nearest3d = _interpolate("upsample_nearest3d", 5, "nearest")
upsample_linear1d = _interpolate("upsample_linear1d", 3, "linear")
upsample_bilinear2d = _interpolate("upsample_bilinear2d", 4, "linear")
upsample_trilinear3d = _interpolate("upsample_trilinear3d", 5, "linear")
def __interpolate(
g, input, size, scale_factor, mode, align_corners, recompute_scale_factor, antialias
):
scales, mode = symbolic_helper._interpolate_get_scales_and_mode(
g, input, size, scale_factor, mode, align_corners
)
return g.op("Upsample", input, scales, mode_s=mode)
def bitwise_not(g, inp):
if inp.type().scalarType() != "Bool":
raise NotImplementedError(
"ONNX export does NOT support exporting bitwise Not "
+ "for non-boolean input values"
)
return g.op("Not", inp)
def wrap_logical_op_with_cast_to(to_type):
def decorator(fn):
def wrap_with_cast(g, input, other):
to_cast_func = globals()[f"_cast_{to_type}"]
return fn(g, to_cast_func(g, input, False), to_cast_func(g, other, False))
return wrap_with_cast
return decorator
def wrap_logical_op_with_negation(func):
def wrap_with_not(g, input, other):
return g.op("Not", func(g, input, other))
return wrap_with_not
def __not_(g, self):
if self.type().scalarType() != "Bool":
raise NotImplementedError(
"ONNX export does NOT support exporting bitwise Not "
+ "for non-boolean input values"
)
return g.op("Not", self)
def eq(g, self, other):
if isinstance(self.type(), _C.DeviceObjType) and isinstance(
other.type(), _C.DeviceObjType
):
# ONNX doesn't have devices, so consider them all to be equal.
# The no-op check for equality will get constant-folded.
return g.op("Constant", value_t=torch.tensor(True, dtype=torch.bool))
return g.op("Equal", self, other)
@wrap_logical_op_with_negation
def ne(g, self, other):
return eq(g, self, other)
def gt(g, input, other):
return gt_impl(g, input, other)
def gt_impl(g, input, other):
if (
input.type().scalarType() is not None
and input.type().scalarType() == "Bool"
and other.type().scalarType() is not None
and other.type().scalarType() == "Bool"
):
input = g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT32)
other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.INT32)
return g.op("Greater", input, other)
def lt(g, input, other):
return lt_impl(g, input, other)
def lt_impl(g, input, other):
if (
input.type().scalarType() is not None
and input.type().scalarType() == "Bool"
and other.type().scalarType() is not None
and other.type().scalarType() == "Bool"
):
input = g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.INT32)
other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.INT32)
return g.op("Less", input, other)
@wrap_logical_op_with_negation
def ge(g, input, other):
return lt_impl(g, input, other)
@wrap_logical_op_with_negation
def le(g, input, other):
return gt_impl(g, input, other)
def __and_(g, input, other):
if input.type().scalarType() == "Bool" and other.type().scalarType() == "Bool":
return g.op("And", input, other)
else:
raise NotImplementedError(
"ONNX export does NOT support exporting bitwise AND "
+ "for non-boolean input values"
)
def __or_(g, input, other):
if input.type().scalarType() == "Bool" and other.type().scalarType() == "Bool":
return g.op("Or", input, other)
else:
raise NotImplementedError(
"ONNX export does NOT support exporting bitwise OR "
+ "for non-boolean input values"
)
def __xor_(g, input, other):
if input.type().scalarType() == "Bool" and other.type().scalarType() == "Bool":
return g.op("Xor", input, other)
else:
raise NotImplementedError(
"ONNX export does NOT support exporting bitwise XOR "
+ "for non-boolean input values"
)
@wrap_logical_op_with_cast_to("Bool")
def logical_and(g, input, other):
return g.op("And", input, other)
@wrap_logical_op_with_cast_to("Bool")
def logical_or(g, input, other):
return g.op("Or", input, other)
@wrap_logical_op_with_cast_to("Bool")
def logical_xor(g, input, other):
return g.op("Xor", input, other)
def __rshift_(g, self, other):
# make sure to cast other to self's type
# (when self is long, make sure that other is not float)
if other.type().scalarType() != self.type().scalarType():
other = g.op(
"Cast",
other,
to_i=_type_utils.JitScalarType.from_name(
self.type().scalarType()
).onnx_type(),
)
two = g.op("Constant", value_t=torch.tensor(2, dtype=torch.float32))
# exponent (same type as self) has to be float or double in onnx::Pow
if not symbolic_helper._is_fp(self):
other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.FLOAT)
two_pow = g.op("Pow", two, other)
two_pow = g.op(
"Cast",
two_pow,
to_i=_type_utils.JitScalarType.from_name(self.type().scalarType()).onnx_type(),
)
rshift = g.op("Div", self, two_pow)
return rshift
def __lshift_(g, self, other):
# make sure to cast other to self's type
# (when self is long, make sure that other is not float)
if other.type().scalarType() != self.type().scalarType():
other = g.op(
"Cast",
other,
to_i=_type_utils.JitScalarType.from_name(
self.type().scalarType()
).onnx_type(),
)
two = g.op("Constant", value_t=torch.tensor(2, dtype=torch.float32))
# exponent (same type as self) has to be float or double in onnx::Pow
if not symbolic_helper._is_fp(self):
other = g.op("Cast", other, to_i=_C_onnx.TensorProtoDataType.FLOAT)
two_pow = g.op("Pow", two, other)
two_pow = g.op(
"Cast",
two_pow,
to_i=_type_utils.JitScalarType.from_name(self.type().scalarType()).onnx_type(),
)
lshift = g.op("Mul", self, two_pow)
return lshift
@symbolic_helper.parse_args("v", "v", "v", "i")
def where(g, condition, self=None, other=None, _outputs=None):
# Assumes that torch.where's first argument takes only Bool and Byte tensors.
if condition.type().scalarType() != "Bool":
condition = g.op("Cast", condition, to_i=_C_onnx.TensorProtoDataType.BOOL)
if self is None:
condition = nonzero(g, condition)
return symbolic_helper._unbind_helper(
g, condition, g.op("Constant", value_t=torch.tensor(1)), _outputs
)
return g.op("Where", condition, self, other)
@symbolic_helper.parse_args("v", "i", "none")
def log_softmax(g, input, dim, dtype=None):
# PyTorch dim and ONNX axis have different meanings.
# See Softmax comment for details.
# TODO: remove this as onnx opset 11 spec allows negative axes
input_dim = symbolic_helper._get_tensor_rank(input)
if input_dim is None:
return symbolic_helper._unimplemented(
"dim",
"ONNX and PyTorch use different strategies to split the input. "
"Input rank must be known at export time.",
)
if dim < 0:
dim = input_dim + dim
is_transpose_required = input_dim != dim + 1
# ONNX only supports log_softmax with dim = -1. Transpose must be added before and after log_softmax to support other cases.
if is_transpose_required:
axes = list(range(input_dim))
axes[dim], axes[-1] = axes[-1], axes[dim]
input = g.op("Transpose", input, perm_i=axes)
dim = input_dim - 1
return_op = g.op("LogSoftmax", input, axis_i=dim)
if dtype and dtype.node().kind() != "prim::Constant":
parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype")
return_op = g.op(
"Cast", return_op, to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type()
)
if is_transpose_required:
return_op = g.op("Transpose", return_op, perm_i=axes)
return return_op
@symbolic_helper.parse_args("v", "i", "i")
def _log_softmax(g, input, dim, half_to_float):
if half_to_float and input.type().scalarType() == "Half":
input = g.op("Cast", input, to_i=_C_onnx.TensorProtoDataType.FLOAT)
return log_softmax(g, input, dim)
@symbolic_helper.parse_args(
"v", "v", "v", "is", "is", "is", "i", "is", "i", "i", "i", "i", "i"
)
def _convolution(
g,
input,
weight,
bias,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
benchmark,
deterministic,
cudnn_enabled,
allow_tf32=None,
):
weight_size = symbolic_helper._get_tensor_sizes(weight)
try:
kernel_shape = weight_size[2:]
except Exception:
# FIXME(justinchuby): Avoid catching Exception.
# Catch a more specific exception instead.
kernel_shape = None
if kernel_shape is None or any([i is None for i in kernel_shape]):
raise RuntimeError(
"Unsupported: ONNX export of convolution for kernel " "of unknown shape."
)
args = [input, weight]
# ONNX only supports 1D bias
if (
not symbolic_helper._is_none(bias)
and symbolic_helper._get_tensor_rank(bias) == 1
):
args.append(bias)
kwargs = {
"kernel_shape_i": weight_size[2:],
"strides_i": stride,
# NB: ONNX supports asymmetric padding, whereas PyTorch supports only
# symmetric padding
"pads_i": padding + padding,
"dilations_i": dilation,
"group_i": groups,
}
if any(o != 0 for o in output_padding):
# ONNX supports both output_shape and output_padding. they are equivalent expressive.
# output_padding is more straightforward, so we use it here.
# output_shape = stride * (input_shape - 1) + output_padding + kernel_shape - padding * 2
assert transposed
assert len(stride) == len(output_padding)
kwargs["output_padding_i"] = output_padding
n = g.op("ConvTranspose" if transposed else "Conv", *args, **kwargs)
if (
not symbolic_helper._is_none(bias)
and symbolic_helper._get_tensor_rank(bias) != 1
):
return g.op("Add", n, bias)
else:
return n
@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "is", "i")
def convolution(
g,
input,
weight,
bias,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
):
return _convolution(
g,
input,
weight,
bias,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
None,
None,
None,
None,
)
@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i")
def conv1d(g, input, weight, bias, stride, padding, dilation, groups):
return _convolution(
g,
input,
weight,
bias,
stride,
padding,
dilation,
False,
(),
groups,
None,
None,
None,
None,
)
@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i")
def conv2d(g, input, weight, bias, stride, padding, dilation, groups):
return _convolution(
g,
input,
weight,
bias,
stride,
padding,
dilation,
False,
(),
groups,
None,
None,
None,
None,
)
@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i")
def conv3d(g, input, weight, bias, stride, padding, dilation, groups):
return _convolution(
g,
input,
weight,
bias,
stride,
padding,
dilation,
False,
(),
groups,
None,
None,
None,
None,
)
@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "is")
def conv_transpose1d(
g, input, weight, bias, stride, padding, output_padding, groups, dilation
):
return _convolution(
g,
input,
weight,
bias,
stride,
padding,
dilation,
True,
output_padding,
groups,
None,
None,
None,
None,
)
@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "is")
def conv_transpose2d(
g, input, weight, bias, stride, padding, output_padding, groups, dilation
):
return _convolution(
g,
input,
weight,
bias,
stride,
padding,
dilation,
True,
output_padding,
groups,
None,
None,
None,
None,
)
@symbolic_helper.parse_args("v", "v", "v", "is", "is", "is", "i", "is")
def conv_transpose3d(
g, input, weight, bias, stride, padding, output_padding, groups, dilation
):
return _convolution(
g,
input,
weight,
bias,
stride,
padding,
dilation,
True,
output_padding,
groups,
None,
None,
None,
None,
)
@symbolic_helper.parse_args("v", "v", "v", "v", "v", "i", "f", "f", "i")
def batch_norm(
g,
input,
weight,
bias,
running_mean,
running_var,
training,
momentum,
eps,
cudnn_enabled,
):
symbolic_helper.check_training_mode(training, "batch_norm")
if (
torch.is_autocast_enabled()
and not symbolic_helper.args_have_same_dtype(
[input, weight, bias, running_mean, running_var]
)
and GLOBALS.export_onnx_opset_version < 15
):
return symbolic_helper._onnx_opset_unsupported_detailed(
"BatchNormalization",
9,
15,
"All input tensors must have the same `dtype`."
" Turn off Autocast or export using opset version 15.",
)
weight, bias, running_mean, running_var = symbolic_helper._batchnorm_helper(
g, input, weight, bias, running_mean, running_var
)
out = g.op(
"BatchNormalization",
input,
weight,
bias,
running_mean,
running_var,
epsilon_f=eps,
momentum_f=1 - momentum,
outputs=1 if not training else 5,
)
if not training:
return out
else:
res, new_running_mean, new_running_var, saved_mean, saved_var = out
new_running_mean.setType(running_mean.type())
new_running_var.setType(running_var.type())
saved_mean.setDebugName("batch_norm_dead_output-" + saved_mean.debugName())
saved_var.setDebugName("batch_norm_dead_output-" + saved_var.debugName())
return res
def _layer_norm_returns_normalized_input_mean_rstd(
g,
input: _C.Value,
normalized_shape: Sequence[int],
weight: _C.Value,
bias: _C.Value,
eps: float,
cudnn_enable: bool,
return_mean_rstd: bool,
) -> Tuple[_C.Value, Optional[_C.Value], Optional[_C.Value]]:
if symbolic_helper.is_caffe2_aten_fallback():
return g.at(
"layer_norm",
input,
weight,
bias,
normalized_shape_i=normalized_shape,
eps_f=eps,
cudnn_enable_i=cudnn_enable,
)
axes = [-i for i in range(len(normalized_shape), 0, -1)]
two_cst = symbolic_helper._generate_wrapped_number(g, 2.0)
eps_cst = symbolic_helper._generate_wrapped_number(g, eps)
mean = g.op("ReduceMean", input, axes_i=axes)
numerator = sub(g, input, mean)
# variance = e((x - e(x))^2), and (x - e(x)) is the numerator in the layer_norm formula
variance = g.op("ReduceMean", pow(g, numerator, two_cst), axes_i=axes)
denominator = sqrt(g, add(g, variance, eps_cst))
normalized = g.op("Div", numerator, denominator)
if not (weight is None or symbolic_helper._is_none(weight)):
normalized = mul(g, normalized, weight)
if not (bias is None or symbolic_helper._is_none(bias)):
normalized = add(g, normalized, bias)
if return_mean_rstd:
# rdenominator = 1 / sqrt(variance + eps)
rdenominator = reciprocal(g, denominator)
return normalized, mean, rdenominator
return normalized, None, None
@symbolic_helper.parse_args("v", "is", "v", "v", "f")
def native_layer_norm(g, input, normalized_shape, weight, bias, eps):
return _layer_norm_returns_normalized_input_mean_rstd(
g, input, normalized_shape, weight, bias, eps, False, True
)
@symbolic_helper.parse_args("v", "is", "v", "v", "f", "i")
def layer_norm(g, input, normalized_shape, weight, bias, eps, cudnn_enable):
normalized, _, _ = _layer_norm_returns_normalized_input_mean_rstd(
g, input, normalized_shape, weight, bias, eps, cudnn_enable, False
)
return normalized
@symbolic_helper.parse_args("v", "v", "v", "v", "v", "i", "f", "f", "i")
def instance_norm(
g,
input,
weight,
bias,
running_mean,
running_var,
use_input_stats,
momentum,
eps,
cudnn_enabled,
):
symbolic_helper.check_training_mode(use_input_stats, "instance_norm")
channel_size = symbolic_helper._get_tensor_dim_size(input, 1)
if weight is None or symbolic_helper._is_none(weight):
if channel_size is None:
raise RuntimeError(
"Unsupported: ONNX export of instance_norm for unknown " "channel size."
)
weight_value = torch.tensor([1.0] * channel_size).type(
"torch." + input.type().scalarType() + "Tensor"
)
weight = g.op("Constant", value_t=weight_value)
if bias is None or symbolic_helper._is_none(bias):
if channel_size is None:
raise RuntimeError(
"Unsupported: ONNX export of instance_norm for unknown " "channel size."
)
bias_value = torch.tensor([0.0] * channel_size).type(
"torch." + input.type().scalarType() + "Tensor"
)
bias = g.op("Constant", value_t=bias_value)
if (
running_mean is None
or symbolic_helper._is_none(running_mean)
or running_var is None
or symbolic_helper._is_none(running_var)
):
return g.op("InstanceNormalization", input, weight, bias, epsilon_f=eps)
else:
input_size = symbolic_helper._get_tensor_sizes(input)
# If input shape is [N, C, H, W], reshape to [1, N * C, H, W] and call batch_norm.
# For more information instance_norm():
# https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/Normalization.cpp#L542
input_size_reshape = input_size.copy()
n = input_size[0]
if n is None:
raise RuntimeError(
"Unsupported: ONNX export of instance_norm training for unknown "
"batch size."
)
c = input_size[1]
input_size_reshape[0] = 1
input_size_reshape[1] = n * c
weight_ = repeat(
g, weight, g.op("Constant", value_t=torch.tensor([n], dtype=torch.int64))
)
bias_ = repeat(
g, bias, g.op("Constant", value_t=torch.tensor([n], dtype=torch.int64))
)
running_mean_ = repeat(
g,
running_mean,
g.op("Constant", value_t=torch.tensor([n], dtype=torch.int64)),
)
running_var_ = repeat(
g,
running_var,
g.op("Constant", value_t=torch.tensor([n], dtype=torch.int64)),
)
input_reshaped = g.op(
"Reshape",
input,
g.op("Constant", value_t=torch.LongTensor(input_size_reshape)),
)
out = batch_norm(
g,
input_reshaped,
weight_,
bias_,
running_mean_,
running_var_,
use_input_stats,
momentum,
eps,
cudnn_enabled,
)
return view(g, out, g.op("Constant", value_t=torch.tensor(input_size)))
@symbolic_helper.parse_args("v", "i", "i", "i")
def unfold(g, input, dimension, size, step):
if symbolic_helper.is_caffe2_aten_fallback():
return g.at("unfold", input, dimension_i=dimension, size_i=size, step_i=step)
sizes = symbolic_helper._get_tensor_sizes(input)
try:
sizedim = sizes[dimension]
except Exception:
# FIXME(justinchuby): Avoid catching Exception.
# Catch a more specific exception instead.
sizedim = None
if sizedim is not None:
low_indices = range(0, sizedim, step)
hi_indices = range(size, sizedim + 1, step)
stack = [
symbolic_helper._slice_helper(
g, input, axes=[dimension], starts=[low], ends=[hi]
)
for low, hi in zip(low_indices, hi_indices)
]
ndim = len(sizes)
perm = list(range(0, ndim))
perm.append(perm.pop(dimension))
unsqueeze = [
symbolic_helper._unsqueeze_helper(
g, g.op("Transpose", t, perm_i=perm), [dimension]
)
for t in stack
]
return g.op("Concat", *unsqueeze, axis_i=dimension)
else:
return symbolic_helper._unimplemented("Unfold", "input size not accessible")
@symbolic_helper.parse_args("v", "t", "t", "t")
def elu(g, input, alpha, scale, input_scale):
if scale and scale != 1.0:
return symbolic_helper._unimplemented("scale", "does not support scale in Elu")
if input_scale and input_scale != 1.0:
return symbolic_helper._unimplemented(
"input_scale", "does not support input_scale in Elu"
)
# See Note [Export inplace]
return g.op("Elu", input, alpha_f=symbolic_helper._scalar(alpha))
def selu(g, input):
return g.op("Selu", input)
@symbolic_helper.parse_args("v", "i", "v")
def index_select(g, self, dim, index):
# In case of a scalar index, index_select returns a tensor with the same rank as the input.
# To match this behavior in ONNX, we make index a 1D tensor so that the following gather
# also produces a tensor with the same rank as the input.
return symbolic_helper._select_helper(g, self, dim, index)
def index_put(g, self, indices_list_value, values, accumulate):
if symbolic_helper._is_packed_list(indices_list_value):
indices_list = symbolic_helper._unpack_list(indices_list_value)
else:
indices_list = [indices_list_value]
if symbolic_helper.is_caffe2_aten_fallback():
args = [self] + indices_list + [values, accumulate]
return g.at("index_put", *args)
accumulate = symbolic_helper._parse_arg(accumulate, "b")
if len(indices_list) == 0:
if accumulate:
return add(g, self, values)
else:
return values
else:
symbolic_helper._onnx_opset_unsupported("index_put", 9, 11)
def index_fill(g, self, dim, index, value):
dim_value = symbolic_helper._parse_arg(dim, "i")
if symbolic_helper.is_caffe2_aten_fallback():
return g.at(
"index_fill",
self,
index,
value,
overload_name="int_Scalar",
dim_i=dim_value,
)
expanded_index_shape, expanded_index = symbolic_helper._index_fill_reshape_helper(
g, self, dim, index
)
value = symbolic_helper._maybe_get_scalar(value)
value = symbolic_helper._if_scalar_type_as(g, value, self)
expanded_value = expand(g, value, expanded_index_shape, None)
return scatter(g, self, dim, expanded_index, expanded_value)
def index_copy(g, self, dim, index, source):
dim_value = symbolic_helper._parse_arg(dim, "i")
if symbolic_helper.is_caffe2_aten_fallback():
return g.at("index_copy", self, index, source, dim_i=dim_value)
expanded_index_shape, expanded_index = symbolic_helper._index_fill_reshape_helper(
g, self, dim, index
)
return scatter(g, self, dim, expanded_index, source)
@symbolic_helper.parse_args("v", "v", "b", "b")
def bucketize(g, self, boundaries, out_int32=False, right=False):
out_type = _C_onnx.TensorProtoDataType.INT64
if out_int32:
out_type = _C_onnx.TensorProtoDataType.INT32
# A tensor expanded_boundaries is created such that it
# contains a copy of boundaries for each element of self.
new_shape = g.op("Concat", g.op("Shape", boundaries), g.op("Shape", self), axis_i=0)
# Unsqueeze step is performed to respect ONNX's numpy style broadcasting for comparison ops
# https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md
tensor_rank = symbolic_helper._get_tensor_rank(self)
assert tensor_rank is not None
unsqueeze_axes = list(range(1, tensor_rank + 1))
expanded_boundaries = expand(
g,
symbolic_helper._unsqueeze_helper(g, boundaries, unsqueeze_axes),
new_shape,
None,
)
# Compare each element of self to boundaries to get a tensor
# with leading 1s and trailing 0s.
# e.g., 4 > [1, 3, 4] = [1, 1, 0]
# The index of the last 1 is the bucket where the element should go.
if right:
cond = ge(g, self, expanded_boundaries)
else:
cond = gt(g, self, expanded_boundaries)
cond_out = g.op("Cast", cond, to_i=out_type)
# Sum to get the number of 1s corresponding to each element,
# which is the same as the bucket index.
# e.g., sum(4 > [1, 3, 4]) = sum([1, 1, 0]) = 2
return symbolic_helper._reducesum_helper(g, cond_out, axes_i=[0], keepdims_i=0)
def type_as(g, self, other):
self_dtype = symbolic_helper._try_get_scalar_type(self)
other_dtype = symbolic_helper._try_get_scalar_type(other)
if self_dtype == other_dtype and self_dtype is not None:
return self
if other_dtype is not None:
return g.op(
"Cast",
self,
to_i=_type_utils.JitScalarType.from_name(other_dtype).onnx_type(),
)
else:
if symbolic_helper.is_caffe2_aten_fallback():
# We don't know the type of other, bail by emitting ATen
return g.at("type_as", self, other)
else:
raise RuntimeError(
"Unsupported: ONNX export of type_as for tensor "
"of unknown dtype. Please check if the dtype of the "
"parameter passed to the type_as function is correct."
)
@symbolic_helper.parse_args("v", "v", "i", "f")
def cosine_similarity(g, x1, x2, dim, eps):
if symbolic_helper.is_caffe2_aten_fallback():
return g.at("cosine_similarity", x1, x2, dim_i=dim, eps_f=eps)
cross = symbolic_helper._reducesum_helper(
g, mul(g, x1, x2), axes_i=[dim], keepdims_i=0
)
x1_l2 = symbolic_helper._reducesum_helper(
g, mul(g, x1, x1), axes_i=[dim], keepdims_i=0
)
x2_l2 = symbolic_helper._reducesum_helper(
g, mul(g, x2, x2), axes_i=[dim], keepdims_i=0
)
div_tens = max(
g, sqrt(g, mul(g, x1_l2, x2_l2)), g.op("Constant", value_t=torch.tensor([eps]))
)
return div(g, cross, div_tens)
def pairwise_distance(g, input1, input2, p, eps, keepdim):
if not symbolic_helper._is_value(eps):
eps = g.op("Constant", value_t=torch.tensor([eps]))
inv_p = div(
g,
g.op("Constant", value_t=torch.tensor([1], dtype=torch.float)),
add(g, p, eps),
)
summation = symbolic_helper._reducesum_helper(
g,
pow(g, sub(g, input1, input2), p),
axes_i=[-1],
keepdims_i=symbolic_helper._parse_arg(keepdim, "i"),
)
return pow(g, summation, inv_p)
# ignore clone operators that are inserted by PyTorch autograd
def clone(g, input, unused_memory_format):
return input
def abs(g, self):
return g.op("Abs", self)
def log(g, self):
return g.op("Log", self)
def log1p(g, self):
return log(
g, add(g, symbolic_helper._if_scalar_type_as(g, torch.ones(1), self), self)
)
def log10(g, self):
_ln10 = 2.30258509299404568401
return g.op("Div", log(g, self), g.op("Constant", value_t=torch.tensor([_ln10])))
def pow(g, self, exponent):
f_dtype = self_dtype = self.type().scalarType()
if not symbolic_helper._is_fp(self):
f_dtype = "Float"
self = g.op(
"Cast", self, to_i=_type_utils.JitScalarType.from_name(f_dtype).onnx_type()
)
if not symbolic_helper._is_fp(exponent):
exponent = g.op(
"Cast",
exponent,
to_i=_type_utils.JitScalarType.from_name(f_dtype).onnx_type(),
)
pow = g.op("Pow", self, exponent)
return pow
def clamp(g, self, min, max):
# min or max may be None that we need to dispatch to
# Clip separately, as ONNX does not have None syntax
if symbolic_helper._is_none(min):
return clamp_max(g, self, max)
elif symbolic_helper._is_none(max):
return clamp_min(g, self, min)
else:
if symbolic_helper._is_constant(min) and symbolic_helper._is_constant(max):
return op_with_optional_float_cast(
g,
"Clip",
self,
min_f=symbolic_helper._parse_arg(min, "f"),
max_f=symbolic_helper._parse_arg(max, "f"),
opset_before=12,
)
else:
return clamp_max(g, clamp_min(g, self, min), max)
@symbolic_helper.parse_args("v", "v")
def clamp_min(g, self, min):
if symbolic_helper._is_constant(min):
return op_with_optional_float_cast(
g, "Clip", self, min_f=symbolic_helper._parse_arg(min, "f"), opset_before=12
)
else:
dtype = self.type().scalarType()
min = g.op(
"Cast", min, to_i=_type_utils.JitScalarType.from_name(dtype).onnx_type()
)
return op_with_optional_float_cast(g, "Max", self, min, opset_before=12)
@symbolic_helper.parse_args("v", "v")
def clamp_max(g, self, max):
if symbolic_helper._is_constant(max):
return op_with_optional_float_cast(
g, "Clip", self, max_f=symbolic_helper._parse_arg(max, "f"), opset_before=12
)
else:
dtype = self.type().scalarType()
max = g.op(
"Cast", max, to_i=_type_utils.JitScalarType.from_name(dtype).onnx_type()
)
return op_with_optional_float_cast(g, "Min", self, max, opset_before=12)
# torch.max (same for torch.min) actually has two interfaces smashed together:
# torch.max(x, dim, keepdim) and torch.max(x, y)
def max(g, self, dim_or_y=None, keepdim=None):
# torch.max(input)
if dim_or_y is None and keepdim is None:
return g.op("ReduceMax", self, keepdims_i=0)
# torch.max(input, other)
if keepdim is None:
return op_with_optional_float_cast(g, "Max", self, dim_or_y, opset_before=12)
# torch.max(input, dim, keepdim)
else:
dim = symbolic_helper._get_const(dim_or_y, "i", "dim")
keepdim = symbolic_helper._get_const(keepdim, "i", "keepdim")
max = g.op("ReduceMax", self, axes_i=[dim], keepdims_i=keepdim)
indices = g.op("ArgMax", self, axis_i=dim, keepdims_i=keepdim)
return max, indices
def maximum(g, input, other):
return max(g, input, dim_or_y=other)
def min(g, self, dim_or_y=None, keepdim=None):
# torch.min(input)
if dim_or_y is None and keepdim is None:
return g.op("ReduceMin", self, keepdims_i=0)
# torch.min(input, other)
if keepdim is None:
return op_with_optional_float_cast(g, "Min", self, dim_or_y, opset_before=12)
# torch.min(input, dim, keepdim)
else:
dim = symbolic_helper._get_const(dim_or_y, "i", "dim")
keepdim = symbolic_helper._get_const(keepdim, "i", "keepdim")
min = g.op("ReduceMin", self, axes_i=[dim], keepdims_i=keepdim)
indices = g.op("ArgMin", self, axis_i=dim, keepdims_i=keepdim)
return min, indices
def minimum(g, input, other):
return min(g, input, dim_or_y=other)
@symbolic_helper.parse_args("v", "is", "i")
def amax(g, self, dim, keepdim):
return g.op("ReduceMax", self, axes_i=dim, keepdims_i=keepdim)
@symbolic_helper.parse_args("v", "is", "i")
def amin(g, self, dim, keepdim):
return g.op("ReduceMin", self, axes_i=dim, keepdims_i=keepdim)
@symbolic_helper.parse_args("v", "v", "i")
def aminmax(g, self, dim, keepdim):
reduce_kwargs = {"keepdims_i": keepdim}
if not symbolic_helper._is_none(dim):
dim = symbolic_helper._get_const(dim, "i", "dim")
reduce_kwargs["axes_i"] = [dim]
return g.op("ReduceMin", self, **reduce_kwargs), g.op(
"ReduceMax", self, **reduce_kwargs
)
def exp(g, self):
return g.op("Exp", self)
@symbolic_helper.parse_args("v", "f", "i")
def dropout(g, input, p, train):
symbolic_helper.check_training_mode(train, "dropout")
# if train is False, dropout is no-op
if not train:
return input
r, _ = g.op("Dropout", input, ratio_f=p, outputs=2)
return r
def _unsupported_dropout(name):
@symbolic_helper.parse_args("v", "f", "i")
def feature_dropout(g, input, p, train):
# NB: In inference mode, FeatureDropout is exported as an identity op.
if train:
return symbolic_helper._unimplemented(name, "training mode")
return input
return feature_dropout
feature_dropout = _unsupported_dropout("feature_dropout")
alpha_dropout = _unsupported_dropout("alpha_dropout")
feature_alpha_dropout = _unsupported_dropout("feature_alpha_dropout")
# See Note [Export inplace]
dropout_ = dropout
feature_dropout_ = feature_dropout
alpha_dropout_ = alpha_dropout
feature_alpha_dropout_ = feature_alpha_dropout
@symbolic_helper.parse_args("v", "t", "is", "i")
def norm(g, self, p, dim, keepdim):
if p == 1:
f = _reduce_op_symbolic("ReduceL1")
elif p == 2:
f = _reduce_op_symbolic("ReduceL2")
else:
raise RuntimeError("ONNX export only p-norms with p of 1 or 2")
return f(g, self, dim=dim, keepdim=keepdim)
@symbolic_helper.parse_args("v", "v", "v", "i")
def conv_tbc(g, input, weight, bias, pad):
if symbolic_helper.is_caffe2_aten_fallback():
return g.at("conv_tbc", input, weight, bias, pad_i=pad)
else:
# input must have 3 dimensions, see:
# https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/ConvolutionTBC.cpp#L8-L10
# input = (time, batch, in_channels)
# weight = (kernel_width, in_channels, out_channels)
# bias = (out_channels,)
input = g.op("Transpose", input, perm_i=[1, 2, 0])
weight = g.op("Transpose", weight, perm_i=[2, 1, 0])
conv = conv1d(g, input, weight, bias, [1], [pad], [1], 1)
return g.op("Transpose", conv, perm_i=[2, 0, 1])
@symbolic_helper.parse_args("v", "i", "i")
def _unique(g, input, sorted, return_inverse):
if symbolic_helper.is_caffe2_aten_fallback():
return g.at(
"_unique",
input,
sorted_i=sorted,
return_inverse_i=return_inverse,
outputs=2,
)
else:
return symbolic_helper._onnx_unsupported("_unique")
@symbolic_helper.parse_args("v", "i", "i", "i")
def _unique2(g, input, sorted, return_inverse, return_counts):
if symbolic_helper.is_caffe2_aten_fallback():
return g.at(
"_unique2",
input,
sorted_i=sorted,
return_inverse_i=return_inverse,
return_counts_i=return_counts,
outputs=3,
)
else:
symbolic_helper._onnx_opset_unsupported("_unique2", 9, 11)
def _cast_func_template(to_i, g, input, non_blocking):
"""Template for creating a cast function."""
return g.op("Cast", input, to_i=to_i)
# Metaprogram symbolics for each ATen native specialized cast operator.
# For e.g. we specify a function named `_cast_Byte` that instantiates an
# ONNX cast node with `to` attribute "UINT8"
# def _cast_Byte
# def _cast_Char
# def _cast_Short
# def _cast_Int
# def _cast_Long
# def _cast_Half
# def _cast_Float
# def _cast_Double
# def _cast_ComplexFloat
# def _cast_ComplexDouble
# def _cast_Bool
# def _cast_BFloat16
for scalar_type in (
"Byte",
"Char",
"Short",
"Int",
"Long",
"Half",
"Float",
"Double",
"ComplexFloat",
"ComplexDouble",
"Bool",
"BFloat16",
):
func_name = f"_cast_{scalar_type}"
globals()[func_name] = symbolic_helper.parse_args("v", "i")(
functools.partial(
_cast_func_template,
_type_utils.JitScalarType.from_name(scalar_type).onnx_type(),
)
)
@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v")
def empty(g, sizes, dtype, layout, device, pin_memory=False, memory_format=None):
return zeros(g, sizes, dtype, layout, device, pin_memory)
@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v")
def empty_like(
g, input, dtype=None, layout=None, device=None, pin_memory=False, memory_format=None
):
return zeros_like(g, input, dtype, layout, device, pin_memory)
def new_empty(g, self, sizes, dtype, layout, device, pin_memory=False):
self_dtype = symbolic_helper._try_get_scalar_type(self)
if dtype is None and self_dtype is not None:
dtype = self_dtype
dtype = _type_utils.JitScalarType.from_name(dtype)
return empty(g, sizes, dtype, layout, device, pin_memory)
def scalar_tensor(g, scalar, dtype, *options):
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
if dtype is None:
dtype = _type_utils.JitScalarType.FLOAT
scalar = g.op("Cast", scalar, to_i=_type_utils.JitScalarType(dtype).onnx_type())
return scalar
def tensor(g, data, dtype=None, device=None, requires_grad=False):
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
if symbolic_helper._is_packed_list(data):
if dtype is None:
scalar_name = symbolic_helper._unpack_list(data)[0].type().scalarType() # type: ignore[attr-defined]
# TODO(justinchuby): Remove type ignore after #81112 is checked in.
dtype = _type_utils.JitScalarType.from_name(scalar_name)
input_list = list()
for t in symbolic_helper._unpack_list(data):
shape_reference = g.op("Constant", value_t=torch.LongTensor([1]))
t = symbolic_helper._reshape_helper(g, t, shape_reference)
t = g.op("Cast", t, to_i=_type_utils.JitScalarType(dtype).onnx_type())
input_list.append(t)
return g.op("Concat", *input_list, axis_i=0)
else:
if dtype is None:
scalar_name = data.type().scalarType()
dtype = _type_utils.JitScalarType.from_name(scalar_name)
if symbolic_helper._is_list(data) and (
symbolic_helper._is_tensor_list(data)
or symbolic_helper._is_scalar_list(data)
):
data = g.op("ConcatFromSequence", data, axis_i=0, new_axis_i=1)
return g.op("Cast", data, to_i=_type_utils.JitScalarType(dtype).onnx_type())
def as_tensor(g, data, dtype=None, device=None):
return tensor(g, data, dtype, device)
@symbolic_helper.parse_args("v", "i", "v", "v", "v")
def zeros(g, sizes, dtype, layout, device, pin_memory=False):
# NOTE: no way to set device, layout and pin_memory in ONNX, so we ignore it
if dtype is None:
scalar_type = _type_utils.JitScalarType.FLOAT
else:
scalar_type = _type_utils.JitScalarType(dtype)
sizes_ = symbolic_helper._maybe_get_const(sizes, "is")
if isinstance(sizes_, list) and len(sizes_) == 0:
sizes = g.op("Constant", value_t=torch.tensor([]).to(torch.int64))
return g.op(
"ConstantOfShape",
sizes,
value_t=torch.tensor([0], dtype=scalar_type.dtype()),
)
@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v")
def zeros_like(
g, input, dtype=None, layout=None, device=None, pin_memory=False, memory_format=None
):
shape = g.op("Shape", input)
if dtype is None:
scalar_type = _type_utils.JitScalarType.FLOAT
else:
scalar_type = _type_utils.JitScalarType(dtype)
return g.op(
"ConstantOfShape",
shape,
value_t=torch.tensor([0], dtype=scalar_type.dtype()),
)
def new_zeros(g, self, sizes, dtype, layout, device, pin_memory=False):
self_dtype = symbolic_helper._try_get_scalar_type(self)
if dtype is None and self_dtype is not None:
dtype = _type_utils.JitScalarType.from_name(self_dtype)
return zeros(g, sizes, dtype, layout, device, pin_memory)
@symbolic_helper.parse_args("v", "i", "v", "v", "v")
def ones(g, sizes, dtype, layout, device, pin_memory=False):
if dtype is None:
scalar_type = _type_utils.JitScalarType.FLOAT
else:
scalar_type = _type_utils.JitScalarType(dtype)
sizes_ = symbolic_helper._maybe_get_const(sizes, "is")
if isinstance(sizes_, list) and len(sizes_) == 0:
sizes = g.op("Constant", value_t=torch.tensor([]).to(torch.int64))
return g.op(
"ConstantOfShape",
sizes,
value_t=torch.tensor([1], dtype=scalar_type.dtype()),
)
@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v")
def ones_like(
g, input, dtype=None, layout=None, device=None, pin_memory=False, memory_format=None
):
shape = g.op("Shape", input)
if dtype is None:
scalar_type = _type_utils.JitScalarType.FLOAT
else:
scalar_type = _type_utils.JitScalarType(dtype)
return g.op(
"ConstantOfShape",
shape,
value_t=torch.tensor([1], dtype=scalar_type.dtype()),
)
def new_ones(g, self, sizes, dtype, layout, device, pin_memory=False):
self_dtype = symbolic_helper._try_get_scalar_type(self)
if dtype is None and self_dtype is not None:
dtype = self_dtype
dtype = _type_utils.JitScalarType.from_name(dtype)
return ones(g, sizes, dtype, layout, device, pin_memory)
def full(g, sizes, value, dtype, layout, device, pin_memory=False):
const_value = symbolic_helper._maybe_get_const(value, "t")
if symbolic_helper._is_value(const_value):
dtype = _type_utils.JitScalarType.FLOAT if dtype is None else dtype
tmp = zeros(g, sizes, dtype, layout, device)
return add(g, tmp, value, g.op("Constant", value_t=torch.tensor(1)))
else:
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
if dtype is None:
scalar_type = _type_utils.JitScalarType.FLOAT
else:
scalar_type = _type_utils.JitScalarType(dtype)
sizes_ = symbolic_helper._maybe_get_const(sizes, "is")
if isinstance(sizes_, list) and len(sizes_) == 0:
sizes = g.op("Constant", value_t=torch.tensor([]).to(torch.int64))
return g.op(
"ConstantOfShape",
sizes,
value_t=const_value.view(1).to(scalar_type.dtype()),
)
def full_like(
g,
input,
fill_value,
dtype=None,
layout=None,
device=None,
pin_memory=False,
memory_format=None,
):
fill_value = symbolic_helper._maybe_get_const(fill_value, "f")
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
if dtype is None:
scalar_type = _type_utils.JitScalarType.FLOAT
else:
scalar_type = _type_utils.JitScalarType(dtype)
if symbolic_helper._is_value(fill_value):
tmp = zeros_like(g, input, dtype, layout, device)
fill_value = g.op("Cast", fill_value, to_i=scalar_type.onnx_type())
return add(g, tmp, fill_value, g.op("Constant", value_t=torch.tensor(1)))
else:
shape = g.op("Shape", input)
return g.op(
"ConstantOfShape",
shape,
value_t=torch.tensor([fill_value], dtype=scalar_type.dtype()),
)
def new_full(g, self, size, fill_value, dtype, layout, device, pin_memory=False):
self_dtype = symbolic_helper._try_get_scalar_type(self)
if dtype is None and self_dtype is not None:
dtype = self_dtype
dtype = _type_utils.JitScalarType.from_name(dtype)
return full(g, size, fill_value, dtype, layout, device, pin_memory)
def eye(g, *args):
if len(args) == 5:
# aten::eye(n, dtype, layout, device, pin_memory)
n, dtype, layout, device, pin_memory = args
dim_size = symbolic_helper._unsqueeze_helper(g, n, [0])
shape = g.op("Concat", dim_size, dim_size, axis_i=0)
tensor = zeros(g, shape, dtype, layout, device)
return g.op("EyeLike", tensor)
elif len(args) == 6:
# aten::eye(n, m, dtype, layout, device, pin_memory)
n, m, dtype, layout, device, pin_memory = args
shape = g.op(
"Concat",
symbolic_helper._unsqueeze_helper(g, n, [0]),
symbolic_helper._unsqueeze_helper(g, m, [0]),
axis_i=0,
)
tensor = zeros(g, shape, dtype, layout, device)
return g.op("EyeLike", tensor)
else:
raise NotImplementedError("Unknown aten::eye signature")
def slice(g, self, *args):
if len(args) == 4:
# aten::slice(Tensor self, int dim, int start, int end, int step) -> Tensor
dim, start, end, step = args
step = symbolic_helper._parse_arg(step, "i")
if step != 1:
raise RuntimeError("step!=1 is currently not supported")
is_start_none = start.node().kind() == "prim::Constant" and isinstance(
start.type(), _C.NoneType
)
is_end_none = end.node().kind() == "prim::Constant" and isinstance(
end.type(), _C.NoneType
)
is_start_onnx_const = start.node().kind() == "onnx::Constant"
is_end_onnx_const = end.node().kind() == "onnx::Constant"
if (
((not is_start_none) and (not is_start_onnx_const))
or ((not is_end_none) and (not is_end_onnx_const))
or dim.node().kind() != "onnx::Constant"
):
if GLOBALS.operator_export_type == _C_onnx.OperatorExportTypes.ONNX:
raise RuntimeError(
"Unsupported: ONNX export of Slice with dynamic inputs. DynamicSlice "
"is a deprecated experimental op. Please use statically allocated "
"variables or export to a higher opset version."
)
else:
start_unsqueezed = symbolic_helper._unsqueeze_helper(g, start, [0])
end_unsqueezed = symbolic_helper._unsqueeze_helper(g, end, [0])
dim_unsqueezed = symbolic_helper._unsqueeze_helper(g, dim, [0])
return g.op(
"DynamicSlice",
self,
start_unsqueezed,
end_unsqueezed,
dim_unsqueezed,
)
else:
start = 0 if is_start_none else symbolic_helper._parse_arg(start, "i")
end = (
9223372036854775807
if is_end_none
else symbolic_helper._parse_arg(end, "i")
)
dim = symbolic_helper._parse_arg(dim, "i")
return symbolic_helper._slice_helper(
g, self, axes=[dim], starts=[start], ends=[end]
)
elif len(args) == 3:
# aten::slice(t[] l, int start, int end, int step) -> t[]
start, end, step = args
dim = 0
is_start_none = start.node().kind() == "prim::Constant" and isinstance(
start.type(), _C.NoneType
)
is_end_none = end.node().kind() == "prim::Constant" and isinstance(
end.type(), _C.NoneType
)
start = 0 if is_start_none else symbolic_helper._parse_arg(start, "i")
end = (
9223372036854775807 if is_end_none else symbolic_helper._parse_arg(end, "i")
)
return symbolic_helper._slice_helper(
g, self, axes=[dim], starts=[start], ends=[end]
)
else:
raise NotImplementedError("Unknown aten::slice signature")
@symbolic_helper.parse_args("v", "f", "f")
def hardtanh(g, self, min_val, max_val):
return op_with_optional_float_cast(
g, "Clip", self, min_f=min_val, max_f=max_val, opset_before=12
)
@symbolic_helper.parse_args("v")
def hardswish(g, self):
hs = hardsigmoid(g, self)
return g.op("Mul", self, hs)
# Fixed scale and zero_point, discovered from aten/src/ATen/native/quantized/cpu/qhardsigmoid.cpp
@symbolic_helper.quantized_args(True, scale=1.0 / 256.0, zero_point=0)
@symbolic_helper.parse_args("v")
def hardsigmoid(g, self):
# Set alpha_f to 1 / 6 to make op equivalent to PyTorch's definition of Hardsigmoid.
# See https://pytorch.org/docs/stable/generated/torch.nn.Hardsigmoid.html
return g.op("HardSigmoid", self, alpha_f=1 / 6)
@symbolic_helper.parse_args("v")
def tanhshrink(g, self):
return g.op("Sub", self, tanh(g, self))
@symbolic_helper.parse_args("v", "f")
def hardshrink(g, self, lambd):
dtype = self.type().scalarType()
if dtype is None:
scalar_type = _type_utils.JitScalarType.FLOAT
else:
scalar_type = _type_utils.JitScalarType.from_name(dtype)
lambd_op = g.op(
"Constant",
value_t=torch.tensor(lambd, dtype=scalar_type.dtype()),
)
cond = logical_or(g, gt(g, self, lambd_op), lt(g, self, neg(g, lambd_op)))
return g.op(
"Where",
cond,
self,
g.op(
"Constant",
value_t=torch.tensor(0, dtype=scalar_type.dtype()),
),
)
@symbolic_helper.parse_args("v", "f")
def softshrink(g, self, lambd):
dtype = self.type().scalarType()
if dtype is None:
scalar_type = _type_utils.JitScalarType.FLOAT
else:
scalar_type = _type_utils.JitScalarType.from_name(dtype)
lambd_op = g.op(
"Constant",
value_t=torch.tensor(lambd, dtype=scalar_type.dtype()),
)
gt_cond = gt(g, self, lambd_op)
gt_out = g.op(
"Where",
gt_cond,
sub(g, self, lambd_op),
g.op(
"Constant",
value_t=torch.tensor(0, dtype=scalar_type.dtype()),
),
)
lt_cond = lt(g, self, neg(g, lambd_op))
lt_out = g.op(
"Where",
lt_cond,
add(g, self, lambd_op),
g.op(
"Constant",
value_t=torch.tensor(0, dtype=scalar_type.dtype()),
),
)
return add(g, gt_out, lt_out)
def alias(g, self):
return self
@symbolic_helper.parse_args("v", "i")
def unsqueeze(g, self, dim):
# Handle negative dim
if dim < 0:
rank = symbolic_helper._get_tensor_rank(self)
if rank is not None:
warnings.warn(
"ONNX export unsqueeze with negative axis "
+ str(dim)
+ " might cause the onnx model to be incorrect. "
+ "Negative axis is not supported in ONNX. "
+ "Axis is converted to "
+ str(dim + rank + 1)
+ " based on input shape at export time. "
+ "Passing an tensor of different rank in execution will be incorrect."
)
dim = dim + rank + 1
else:
return symbolic_helper._unimplemented(
"unsqueeze", "negative axis with unknown input rank"
)
return symbolic_helper._unsqueeze_helper(g, self, axes_i=[dim])
@symbolic_helper.parse_args("v", "i", "i", "none")
def sort(g, self, dim, decending, out=None):
if out is not None:
symbolic_helper._unimplemented(
"Sort", "Out parameter is not supported for sort"
)
self_sizes = symbolic_helper._get_tensor_sizes(self)
try:
dim_size = self_sizes[dim]
except Exception:
# FIXME(justinchuby): Avoid catching Exception.
# Catch a more specific exception instead.
dim_size = None
if dim_size is None:
return symbolic_helper._unimplemented("Sort", "input size not accessible")
return g.op("TopK", self, k_i=dim_size, axis_i=dim, outputs=2)
def numel(g, self):
shape = g.op("Shape", self)
return g.op("ReduceProd", shape, keepdims_i=0)
@symbolic_helper.parse_args("v", "i", "i", "i", "i", "none")
def topk(g, self, k, dim, largest, sorted, out=None):
if out is not None:
symbolic_helper._unimplemented(
"TopK", "Out parameter is not supported for topk"
)
if not largest:
symbolic_helper._unimplemented("TopK", "Ascending TopK is not supported")
return g.op("TopK", self, k_i=k, axis_i=dim, outputs=2)
def to(g, self, *args):
def is_aten_to_device_only(args):
if len(args) == 4:
# aten::to(Tensor, Device, bool, bool, memory_format)
return (
args[0].node().kind() == "prim::device"
or args[0].type().isSubtypeOf(_C.ListType.ofInts())
or isinstance(args[0].type(), _C.DeviceObjType)
)
elif len(args) == 5:
# aten::to(Tensor, Device, ScalarType, bool, bool, memory_format)
# When dtype is None, this is a aten::to(device) call
dtype = symbolic_helper._get_const(args[1], "i", "dtype")
return dtype is None
elif len(args) in (6, 7):
# aten::to(Tensor, ScalarType, Layout, Device, bool, bool, memory_format) -> Tensor
# aten::to(Tensor, ScalarType, Layout, Device, bool, bool, bool, memory_format) -> Tensor
# When dtype is None, this is a aten::to(device) call
dtype = symbolic_helper._get_const(args[0], "i", "dtype")
return dtype is None
return False
# ONNX doesn't have a concept of a device, so we ignore device-only casts
if is_aten_to_device_only(args):
return self
if len(args) == 4:
# TestONNXRuntime::test_ones_bool shows args[0] of aten::to() can be onnx::Constant[value=<Tensor>]()
# In this case, the constant value is a tensor not int,
# so symbolic_helper._maybe_get_const(args[0], 'i') would not work.
dtype = args[0]
if (
symbolic_helper._is_value(args[0])
and args[0].node().kind() == "onnx::Constant"
):
tval = symbolic_helper._node_get(args[0].node(), "value")
if isinstance(tval, torch.Tensor):
if len(tval.shape) == 0:
tval = tval.item()
dtype = int(tval)
else:
dtype = tval
if symbolic_helper._is_value(dtype) or isinstance(dtype, torch.Tensor):
# aten::to(Tensor, Tensor, bool, bool, memory_format)
dtype = args[0].type().scalarType()
return g.op(
"Cast",
self,
to_i=_type_utils.JitScalarType.from_name(dtype).onnx_type(),
)
else:
# aten::to(Tensor, ScalarType, bool, bool, memory_format)
# memory_format is ignored
return g.op("Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type())
elif len(args) == 5:
# aten::to(Tensor, Device, ScalarType, bool, bool, memory_format)
dtype = symbolic_helper._get_const(args[1], "i", "dtype")
# memory_format is ignored
return g.op("Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type())
elif len(args) == 6:
# aten::to(Tensor, ScalarType, Layout, Device, bool, bool, memory_format) -> Tensor
dtype = symbolic_helper._get_const(args[0], "i", "dtype")
# Layout, device and memory_format are ignored
return g.op("Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type())
elif len(args) == 7:
# aten::to(Tensor, ScalarType, Layout, Device, bool, bool, bool, memory_format) -> Tensor
dtype = symbolic_helper._get_const(args[0], "i", "dtype")
# Layout, device and memory_format are ignored
return g.op("Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type())
else:
return symbolic_helper._onnx_unsupported("Unknown aten::to signature")
def repeat(g, self, repeats):
dtype = _type_utils.JitScalarType.INT64
shape_ = ones_like(g, repeats, dtype)
self = g.op("Expand", self, shape_)
return g.op("Tile", self, repeats)
def repeat_interleave(g, self, repeats, dim=None, output_size=None):
input = self
# if dim is None flatten
# By default, use the flattened input array, and return a flat output array
if symbolic_helper._is_none(dim):
input = symbolic_helper._reshape_helper(
g, self, g.op("Constant", value_t=torch.tensor([-1]))
)
dim = 0
else:
dim = symbolic_helper._maybe_get_scalar(dim)
repeats_dim = symbolic_helper._get_tensor_rank(repeats)
repeats_sizes = symbolic_helper._get_tensor_sizes(repeats)
input_sizes = symbolic_helper._get_tensor_sizes(input)
if repeats_dim is None:
raise RuntimeError(
"Unsupported: ONNX export of repeat_interleave for unknown repeats rank."
)
if repeats_sizes is None:
raise RuntimeError(
"Unsupported: ONNX export of repeat_interleave for unknown repeats size."
)
if input_sizes is None:
raise RuntimeError(
"Unsupported: ONNX export of repeat_interleave for unknown input size."
)
input_sizes_temp = input_sizes.copy()
for idx, input_size in enumerate(input_sizes):
if input_size is None:
input_sizes[idx], input_sizes_temp[idx] = 0, -1
# Cases where repeats is an int or single value tensor
if repeats_dim == 0 or (repeats_dim == 1 and repeats_sizes[0] == 1):
if not symbolic_helper._is_tensor(repeats):
repeats = g.op("Constant", value_t=torch.LongTensor(repeats))
if input_sizes[dim] == 0:
return symbolic_helper._onnx_opset_unsupported_detailed(
"repeat_interleave",
9,
13,
"Unsupported along dimension with unknown input size",
)
else:
reps = input_sizes[dim]
repeats = expand(
g, repeats, g.op("Constant", value_t=torch.tensor([reps])), None
)
# Cases where repeats is a 1 dim Tensor
elif repeats_dim == 1:
if input_sizes[dim] == 0:
return symbolic_helper._onnx_opset_unsupported_detailed(
"repeat_interleave",
9,
13,
"Unsupported along dimension with unknown input size",
)
if repeats_sizes[0] is None:
return symbolic_helper._onnx_opset_unsupported_detailed(
"repeat_interleave", 9, 13, "Unsupported for cases with dynamic repeats"
)
assert (
repeats_sizes[0] == input_sizes[dim]
), "repeats must have the same size as input along dim"
reps = repeats_sizes[0]
else:
raise RuntimeError("repeats must be 0-dim or 1-dim tensor")
final_splits = list()
r_splits = symbolic_helper._repeat_interleave_split_helper(g, repeats, reps, 0)
i_splits = symbolic_helper._repeat_interleave_split_helper(g, input, reps, dim)
input_sizes[dim], input_sizes_temp[dim] = -1, 1
for idx, r_split in enumerate(r_splits):
i_split = unsqueeze(g, i_splits[idx], dim + 1)
r_concat = [
g.op("Constant", value_t=torch.LongTensor(input_sizes_temp[: dim + 1])),
r_split,
g.op("Constant", value_t=torch.LongTensor(input_sizes_temp[dim + 1 :])),
]
r_concat = g.op("Concat", *r_concat, axis_i=0)
i_split = expand(g, i_split, r_concat, None)
i_split = symbolic_helper._reshape_helper(
g,
i_split,
g.op("Constant", value_t=torch.LongTensor(input_sizes)),
allowzero=0,
)
final_splits.append(i_split)
return g.op("Concat", *final_splits, axis_i=dim)
@symbolic_helper.parse_args("v", "i")
def pixel_shuffle(g, self, upscale_factor):
dims = symbolic_helper._get_tensor_sizes(self)
if len(dims) != 4:
return symbolic_helper._unimplemented("pixel_shuffle", "only support 4d input")
if any(i is None for i in dims[1:]):
after_view = symbolic_helper._reshape_helper(
g,
symbolic_helper._unsqueeze_helper(g, self, [2, 3]),
g.op(
"Constant",
value_t=torch.tensor([0, -1, upscale_factor, upscale_factor, 0, 0]),
),
allowzero=0,
)
after_transpose = g.op("Transpose", after_view, perm_i=[0, 1, 4, 2, 5, 3])
# For dynamic input shapes, two reshapes are performed
reshape_h = symbolic_helper._reshape_helper(
g,
after_transpose,
g.op("Constant", value_t=torch.tensor([0, 0, -1, 1, 0, 0])),
allowzero=0,
)
reshape_w = symbolic_helper._reshape_helper(
g,
reshape_h,
g.op("Constant", value_t=torch.tensor([0, 0, 0, 0, -1, 1])),
allowzero=0,
)
return symbolic_helper._squeeze_helper(g, reshape_w, [3, 5])
else:
output_channel = dims[1] // upscale_factor // upscale_factor
after_view = symbolic_helper._reshape_helper(
g,
self,
g.op(
"Constant",
value_t=torch.tensor(
[
-1,
output_channel,
upscale_factor,
upscale_factor,
dims[2],
dims[3],
]
),
),
allowzero=0,
)
after_transpose = g.op("Transpose", after_view, perm_i=[0, 1, 4, 2, 5, 3])
return symbolic_helper._reshape_helper(
g,
after_transpose,
g.op(
"Constant",
value_t=torch.tensor(
[
-1,
output_channel,
dims[2] * upscale_factor,
dims[3] * upscale_factor,
]
),
),
allowzero=0,
)
@symbolic_helper.parse_args("v", "i")
def pixel_unshuffle(g, self, downscale_factor):
dims = symbolic_helper._get_tensor_sizes(self)
if len(dims) != 4:
return symbolic_helper._unimplemented("pixel_shuffle", "only support 4d input")
if any(i is None for i in dims[1:]):
# For dynamic input shapes, two reshapes are performed
reshape_h = symbolic_helper._reshape_helper(
g,
symbolic_helper._unsqueeze_helper(g, self, [3]),
g.op("Constant", value_t=torch.tensor([0, 0, -1, downscale_factor, 0])),
allowzero=0,
)
reshape_w = symbolic_helper._reshape_helper(
g,
reshape_h,
g.op("Constant", value_t=torch.tensor([0, 0, 0, 0, -1, downscale_factor])),
allowzero=0,
)
after_transpose = g.op("Transpose", reshape_w, perm_i=[0, 1, 3, 5, 2, 4])
final_reshape = symbolic_helper._reshape_helper(
g,
after_transpose,
g.op("Constant", value_t=torch.tensor([0, -1, 1, 1, 0, 0])),
allowzero=0,
)
return symbolic_helper._squeeze_helper(g, final_reshape, [2, 3])
else:
output_channel = dims[1] * downscale_factor * downscale_factor
after_view = symbolic_helper._reshape_helper(
g,
self,
g.op(
"Constant",
value_t=torch.tensor(
[
-1,
dims[1],
dims[2] // downscale_factor,
downscale_factor,
dims[3] // downscale_factor,
downscale_factor,
]
),
),
allowzero=0,
)
after_transpose = g.op("Transpose", after_view, perm_i=[0, 1, 3, 5, 2, 4])
return symbolic_helper._reshape_helper(
g,
after_transpose,
g.op(
"Constant",
value_t=torch.tensor(
[
-1,
output_channel,
dims[2] // downscale_factor,
dims[3] // downscale_factor,
]
),
),
allowzero=0,
)
def _generic_rnn(
g,
variant,
input,
initial_states,
all_weights,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first=None,
batch_sizes=None,
):
warnings.warn(
"Exporting a model to ONNX with a batch_size other than 1, "
+ "with a variable length with "
+ variant
+ " can cause an error "
+ "when running the ONNX model with a different batch size. "
+ "Make sure to save the model with a batch size of 1, "
+ "or define the initial states (h0/c0) as inputs of the model. "
)
onnxActivations = [
"Relu",
"Tanh",
"Sigmoid",
"Affine",
"LeakyRelu",
"ThresholdedRelu",
"ScaledTanh",
"HardSigmoid",
"Elu",
"Softsign",
"Softplus",
]
variantToOnnxActivationMap = dict(
zip([act_fun.lower() for act_fun in onnxActivations], onnxActivations)
)
weights_per_layer = 4 if has_biases else 2
# this means that projections are used inside LSTM, so need to tell user that it's not supported
if variant == "LSTM" and len(all_weights) != num_layers * weights_per_layer * (
1 + bidirectional
):
return symbolic_helper._unimplemented("LSTM", "LSTMs with projections")
assert len(all_weights) == num_layers * weights_per_layer * (1 + bidirectional)
layer_weights = [
all_weights[i : i + weights_per_layer]
for i in range(0, len(all_weights), weights_per_layer)
]
if batch_first:
# batch, seq, feat -> seq, batch, feat
input = g.op("Transpose", input, perm_i=[1, 0, 2])
if dropout and train:
return symbolic_helper._unimplemented(
"RNN/GRU/LSTM", "dropout in training mode"
)
if variant.startswith("RNN"):
nonlinearity = variantToOnnxActivationMap[variant[4:].lower()]
variant = "RNN"
w_hh = all_weights[1]
hidden_size = symbolic_helper._get_tensor_dim_size(w_hh, 1)
if hidden_size is None:
return symbolic_helper._unimplemented("RNN/GRU/LSTM", "unknown hidden size")
unidirectional = not bidirectional
prev_output = input
h_outs = []
if variant == "RNN" or variant == "GRU":
h0 = initial_states
elif variant == "LSTM":
h0, c0 = initial_states
c_outs = []
sequence_lens = unused(g) if batch_sizes is None else batch_sizes
if variant == "GRU":
# pytorch is reset, input, hidden
# onnx is input, reset, hidden
reform_permutation = [(1, 2), (0, 1), (2, 3)]
elif variant == "LSTM":
# pytorch is input, forget, cell, output.
# onnx is input, output, forget, cell.
reform_permutation = [(0, 1), (3, 4), (1, 3)]
def reform_weights(g, w, n, intervals):
slices = [
symbolic_helper._slice_helper(g, w, axes=[0], starts=[x * n], ends=[y * n])
for x, y in intervals
]
return g.op("Concat", *slices, axis_i=0)
def transform_weights_no_bias(layer_index):
weights = layer_weights[layer_index]
if variant == "RNN":
weight_ih, weight_hh = weights
elif variant == "GRU" or variant == "LSTM":
weight_ih, weight_hh = (
reform_weights(g, w, hidden_size, reform_permutation) for w in weights
)
return tuple(
symbolic_helper._unsqueeze_helper(g, x, [0]) for x in (weight_ih, weight_hh)
)
def transform_weights(layer_index):
weights = layer_weights[layer_index]
if variant == "RNN":
weight_ih, weight_hh, bias_ih, bias_hh = weights
elif variant == "GRU" or variant == "LSTM":
weight_ih, weight_hh, bias_ih, bias_hh = (
reform_weights(g, w, hidden_size, reform_permutation) for w in weights
)
bias_concat = g.op("Concat", bias_ih, bias_hh, axis_i=0)
return tuple(
symbolic_helper._unsqueeze_helper(g, x, [0])
for x in (weight_ih, weight_hh, bias_concat)
)
def retrieve_state(x, start, end):
return (
x
if num_layers == 1
else symbolic_helper._slice_helper(
g, x, axes=[0], starts=[start], ends=[end]
)
)
for i in range(num_layers):
if unidirectional:
if weights_per_layer == 4:
weight_ih, weight_hh, bias_concat = transform_weights(i)
else:
weight_ih, weight_hh = transform_weights_no_bias(i)
bias_concat = unused(g)
state_indices = i, i + 1
else:
if weights_per_layer == 4:
weight_ih_f, weight_hh_f, bias_f = transform_weights(2 * i)
weight_ih_b, weight_hh_b, bias_b = transform_weights(2 * i + 1)
bias_concat = g.op("Concat", bias_f, bias_b, axis_i=0)
else:
weight_ih_f, weight_hh_f = transform_weights_no_bias(2 * i)
weight_ih_b, weight_hh_b = transform_weights_no_bias(2 * i + 1)
bias_concat = unused(g)
weight_ih = g.op("Concat", weight_ih_f, weight_ih_b, axis_i=0)
weight_hh = g.op("Concat", weight_hh_f, weight_hh_b, axis_i=0)
state_indices = 2 * i, 2 * i + 2
inputs = [prev_output, weight_ih, weight_hh, bias_concat, sequence_lens]
inputs.append(retrieve_state(h0, *state_indices))
if variant == "LSTM":
inputs.append(retrieve_state(c0, *state_indices))
extra_kwargs = {} if unidirectional else {"direction_s": "bidirectional"}
if variant == "RNN":
if bidirectional:
activation = [nonlinearity, nonlinearity]
else:
activation = [nonlinearity]
prev_output, h_out = g.op(
"RNN",
*inputs,
outputs=2,
hidden_size_i=hidden_size,
activations_s=activation,
**extra_kwargs,
)
elif variant == "GRU":
prev_output, h_out = g.op(
"GRU",
*inputs,
outputs=2,
hidden_size_i=hidden_size,
linear_before_reset_i=1,
**extra_kwargs,
)
elif variant == "LSTM":
prev_output, h_out, c_out = g.op(
"LSTM", *inputs, outputs=3, hidden_size_i=hidden_size, **extra_kwargs
)
if bidirectional:
# The ONNX RNN/GRU/LSTM produce an output of dimensions
# seq_len, num_directions, batch, hidden_size
# We have to convert to match pytorch's expected
# seq_len, batch, num_directions * hidden_size
# by first moving num_directions before hidden_size with
# Transpose, and then combining it with hidden_size
# with Reshape.
prev_output = g.op("Transpose", prev_output, perm_i=[0, 2, 1, 3])
prev_output = symbolic_helper._reshape_helper(
g,
prev_output,
g.op("Constant", value_t=torch.LongTensor([0, 0, -1])),
allowzero=0,
)
else:
prev_output = symbolic_helper._squeeze_helper(g, prev_output, [1])
h_outs.append(h_out)
if variant == "LSTM":
c_outs.append(c_out)
if batch_first:
# seq, batch, num_directions * hidden_size -> batch, seq, num_directions * hidden_size
prev_output = g.op("Transpose", prev_output, perm_i=[1, 0, 2])
h_outs = h_out if num_layers == 1 else g.op("Concat", *h_outs, axis_i=0)
if variant == "RNN" or variant == "GRU":
return prev_output, h_outs
elif variant == "LSTM":
c_outs = c_out if num_layers == 1 else g.op("Concat", *c_outs, axis_i=0)
return prev_output, h_outs, c_outs
@symbolic_helper.parse_args("v", "v", "v", "i", "i", "f", "i", "i", "i")
def _lstm_full(
g,
input,
hidden_v,
weight_v,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
):
hidden, weight = symbolic_helper._unpack_list(
hidden_v
), symbolic_helper._unpack_list(weight_v)
return _generic_rnn(
g,
"LSTM",
input,
hidden,
weight,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
)
@symbolic_helper.parse_args("v", "v", "v", "v", "i", "i", "f", "i", "i")
def _lstm_packed(
g,
input,
batch_sizes,
hidden_v,
weight_v,
has_biases,
num_layers,
dropout,
train,
bidirectional,
):
hidden, weight = symbolic_helper._unpack_list(
hidden_v
), symbolic_helper._unpack_list(weight_v)
return _generic_rnn(
g,
"LSTM",
input,
hidden,
weight,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_sizes=batch_sizes,
)
def lstm(g, *args):
if symbolic_helper._is_tensor_list(args[3]):
return _lstm_packed(g, *args)
else:
return _lstm_full(g, *args)
def lstm_cell(g, self, hidden, w_ih, w_hh, b_ih, b_hh):
input = symbolic_helper._unsqueeze_helper(g, self, [0])
hidden = symbolic_helper._unpack_list(hidden)
hidden = [symbolic_helper._unsqueeze_helper(g, x, [0]) for x in hidden]
weight = (
(w_ih, w_hh, b_ih, b_hh) if symbolic_helper._is_tensor(b_ih) else (w_ih, w_hh)
)
has_biases = True if symbolic_helper._is_tensor(b_ih) else False
_, h_outs, c_outs = _generic_rnn(
g,
"LSTM",
input,
hidden,
weight,
has_biases,
num_layers=1,
dropout=0,
train=0,
bidirectional=False,
batch_first=False,
)
return symbolic_helper._squeeze_helper(
g, h_outs, [0]
), symbolic_helper._squeeze_helper(g, c_outs, [0])
def _one_hidden_rnn(kind):
@symbolic_helper.parse_args("v", "v", "v", "i", "i", "f", "i", "i", "i")
def _rnn_full(
g,
input,
hidden,
weight_v,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
):
weight = symbolic_helper._unpack_list(weight_v)
return _generic_rnn(
g,
kind,
input,
hidden,
weight,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first,
)
@symbolic_helper.parse_args("v", "v", "v", "v", "i", "i", "f", "i", "i")
def _rnn_packed(
g,
input,
batch_sizes,
hidden,
weight_v,
has_biases,
num_layers,
dropout,
train,
bidirectional,
):
weight = symbolic_helper._unpack_list(weight_v)
return _generic_rnn(
g,
kind,
input,
hidden,
weight,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_sizes=batch_sizes,
)
def symbolic(g, *args):
if symbolic_helper._is_tensor_list(args[3]):
return _rnn_packed(g, *args)
else:
return _rnn_full(g, *args)
return symbolic
gru = _one_hidden_rnn("GRU")
rnn_tanh = _one_hidden_rnn("RNN_TANH")
rnn_relu = _one_hidden_rnn("RNN_RELU")
@symbolic_helper.parse_args("v", "i")
def _dim_arange(g, like, dim):
like_shape = g.op("Shape", like)
stop = g.op(
"Gather", like_shape, g.op("Constant", value_t=torch.tensor(dim)), axis_i=0
)
if symbolic_helper.is_caffe2_aten_fallback():
return g.op("_caffe2::Range", stop)
else:
# aten::arange(Scalar end, ScalarType dtype, Layout, Device, bool pin_memory)
return arange(g, stop, 4, None, None, None)
def detach(g, input):
# Erase aten::detach nodes because ONNX is inference only
return input
@symbolic_helper.parse_args("v", "i")
def contiguous(g, input, memory_format):
if memory_format > 2: # allower values are any, preserve and contiguous_format
raise RuntimeError("onnx memory_format support is not implemented")
return input
@symbolic_helper.parse_args("v", "v", "i")
def _pack_padded_sequence(g, input, lengths, batch_first):
# Currently there is no PackPadded operator in ONNX. We rely on an
# optimization pass to remove this later. It is an error if all
# PackPadded operators cannot be optimized out.
if batch_first:
input = g.op("Transpose", input, perm_i=[1, 0, 2])
if not lengths.type().isSubtypeOf(torch._C.TensorType.get()):
raise RuntimeError("Lengths must be a Tensor for ONNX export")
# We know it's a TensorType so this check is now safe.
# It's really only necessary because those operators expand to something that
# only works with int32 types in Caffe2...
if lengths.type().scalarType() != "Int":
lengths = _cast_Int(g, lengths, False) # type: ignore[name-defined]
return g.op("prim::PackPadded", input, lengths, outputs=2)
@symbolic_helper.parse_args("v", "v", "i", "t", "v")
def _pad_packed_sequence(
g, data, batch_sizes, batch_first, padding_value, total_length
):
# Ignore total_length as it is not supported in _symbolic_pad_packed_sequence
# It is only useful/used when training using data_parallel model, so
# It shouldn't be relevant for ONNX anyway
data, lengths = g.op("prim::PadPacked", data, batch_sizes, outputs=2)
if batch_first:
data = g.op("Transpose", data, perm_i=[1, 0, 2])
return data, lengths
def randn(g, shapes, dtype, *options):
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
if dtype is None:
scalar_type = _type_utils.JitScalarType.FLOAT
else:
scalar_type = _type_utils.JitScalarType(dtype)
shape = symbolic_helper._maybe_get_const(shapes, "is")
if symbolic_helper._is_value(shape):
shape_const = g.op(
"ConstantOfShape",
shapes,
value_t=torch.tensor([0], dtype=torch.float),
)
return g.op(
"RandomNormalLike",
shape_const,
dtype_i=scalar_type.onnx_type(),
)
return g.op(
"RandomNormal",
shape_i=shape,
dtype_i=scalar_type.onnx_type(),
)
def rand(g, shapes, dtype, *options):
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
if dtype is None:
scalar_type = _type_utils.JitScalarType.FLOAT
else:
scalar_type = _type_utils.JitScalarType(dtype)
shape = symbolic_helper._maybe_get_const(shapes, "is")
if symbolic_helper._is_value(shape):
shape_const = g.op(
"ConstantOfShape",
shapes,
value_t=torch.tensor([0], dtype=torch.float),
)
return g.op(
"RandomUniformLike",
shape_const,
dtype_i=scalar_type.onnx_type(),
)
return g.op(
"RandomUniform",
shape_i=shape,
dtype_i=scalar_type.onnx_type(),
)
def randn_like(
g, self, dtype, layout=None, device=None, pin_memory=False, memory_format=None
):
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
if dtype is None:
scalar_type = _type_utils.JitScalarType.FLOAT
else:
scalar_type = _type_utils.JitScalarType(dtype)
return g.op("RandomNormalLike", self, dtype_i=scalar_type.onnx_type())
def rand_like(
g, self, dtype, layout=None, device=None, pin_memory=False, memory_format=None
):
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
if dtype is None:
dtype = _type_utils.JitScalarType.FLOAT
return g.op(
"RandomUniformLike", self, dtype_i=_type_utils.JitScalarType(dtype).onnx_type()
)
@symbolic_helper.parse_args("v", "f", "f", "i", "none")
def rrelu(g, input, lower, upper, training, generator):
if not training:
slope = (upper + lower) / 2.0
return g.op("LeakyRelu", input, alpha_f=slope)
p = g.op("RandomUniformLike", input, high_f=upper, low_f=lower)
return g.op("PRelu", input, p)
def bernoulli(g, input, generator=None, out=None):
if out is not None:
symbolic_helper._unimplemented(
"Bernoulli", "out parameter is not supported for bernoulli"
)
if generator is not None and not symbolic_helper._is_none(generator):
symbolic_helper._unimplemented(
"Bernoulli", "generator is not supported for bernoulli"
)
dtype = symbolic_helper._try_get_scalar_type(input)
if dtype is None:
return symbolic_helper._unimplemented("Bernoulli", "input dtype not accessible")
p = g.op(
"RandomUniformLike",
input,
high_f=1.0,
low_f=0.0,
dtype_i=_type_utils.JitScalarType.from_name(dtype).onnx_type(),
)
output = g.op("Less", p, input)
return g.op(
"Cast", output, to_i=_type_utils.JitScalarType.from_name(dtype).onnx_type()
)
@symbolic_helper.parse_args("v")
def log_sigmoid(g, input):
p = g.op("Sigmoid", input)
return g.op("Log", p)
@symbolic_helper.parse_args("v")
def erf(g, input):
return g.op("Erf", input)
@symbolic_helper.quantized_args(True, False, False)
@symbolic_helper.parse_args("v", "i", "i")
def flatten(g, input, start_dim, end_dim):
dim = symbolic_helper._get_tensor_rank(input)
if dim is None:
return symbolic_helper._unimplemented(
"dim",
"ONNX and PyTorch use different strategies to split the input. "
"Input rank must be known at export time.",
)
# TODO: remove this as onnx opset 11 spec allows negative axes
if end_dim < 0:
end_dim = dim + end_dim
# use ONNX's Flatten operator for cases where the output shape is 2D
if start_dim == 1 and end_dim == dim - 1:
return g.op("Flatten", input, axis_i=start_dim)
if start_dim == 0 and end_dim == dim - 2:
return g.op("Flatten", input, axis_i=end_dim + 1)
return symbolic_helper._flatten_helper(g, input, start_dim, end_dim, dim)
@symbolic_helper.parse_args("v")
def nonzero(g, input):
"""Emitted from `torch.nonzero(x, as_tuple=False)`"""
return t(g, g.op("NonZero", input))
# Emitted from `torch.nonzero(x, as_tuple=True)`
def nonzero_numpy(g, input, _outputs=None):
return unbind(g, nonzero(g, input), 1, _outputs=_outputs)
@symbolic_helper.parse_args("v")
def isnan(g, input):
output = g.op("IsNaN", input)
return output
def _any(g, *args):
# aten::any(Tensor self)
if len(args) == 1:
input = args[0]
dim, keepdim = None, 0
# aten::any(Tensor self, int dim, bool keepdim)
else:
input, dim, keepdim = args
dim = [symbolic_helper._parse_arg(dim, "i")]
keepdim = symbolic_helper._parse_arg(keepdim, "i")
input = _cast_Long(g, input, False) # type: ignore[name-defined]
input_sum = symbolic_helper._reducesum_helper(
g, input, axes_i=dim, keepdims_i=keepdim
)
return gt(g, input_sum, g.op("Constant", value_t=torch.tensor(0, dtype=torch.long)))
def _all(g, *args):
input = g.op("Not", args[0])
# aten::all(Tensor self)
if len(args) == 1:
return g.op("Not", _any(g, input))
# aten::all(Tensor self, int dim, bool keepdim)
else:
return g.op("Not", _any(g, input, args[1], args[2]))
@symbolic_helper.parse_args("v", "i", "i", "i")
def narrow(g, input, dim, start, length):
return symbolic_helper._slice_helper(
g, input, axes=[dim], starts=[start], ends=[start + length]
)
@symbolic_helper.parse_args("v", "v", "i")
def argmax(g, input: torch._C.Value, dim: torch._C.Value, keepdim: int):
return symbolic_helper._argmin_argmax_helper(g, input, dim, keepdim, "ArgMax")
@symbolic_helper.parse_args("v", "v", "i")
def argmin(g, input: torch._C.Value, dim: torch._C.Value, keepdim: int):
return symbolic_helper._argmin_argmax_helper(g, input, dim, keepdim, "ArgMin")
@symbolic_helper.parse_args("v", "i", "v", "v")
def scatter(g, self, dim, index, src):
src_type = src.type().scalarType()
src = symbolic_helper._maybe_get_scalar(src)
if symbolic_helper._is_value(src):
return g.op("Scatter", self, index, src, axis_i=dim)
else:
# Check if scalar "src" has same type as self (PyTorch allows different
# type for scalar src (but not when src is tensor)). If not, insert Cast node.
if self.type().scalarType() != src_type:
src = g.op(
"Cast",
src,
to_i=_type_utils.JitScalarType.from_name(
self.type().scalarType()
).onnx_type(),
)
return g.op("Scatter", self, index, expand_as(g, src, index), axis_i=dim)
@symbolic_helper.parse_args("v", "i", "v", "v")
def scatter_add(g, self, dim, index, src):
scalar_name = symbolic_helper._try_get_scalar_type(self)
if scalar_name is None:
return symbolic_helper._unimplemented(
"scatter_add", "input dtype not accessible"
)
scalar_type = _type_utils.JitScalarType.from_name(scalar_name)
sizes = symbolic_helper._get_tensor_sizes(self, allow_nonstatic=False)
if sizes:
to_add = g.op("Constant", value_t=torch.zeros(sizes, dtype=scalar_type.dtype()))
else:
to_add = zeros_like(g, self, scalar_type)
to_add = symbolic_helper._scatter_helper(g, to_add, dim, index, src)
return add(g, self, to_add)
def log2(g, self):
_ln2 = 0.693147180559945309
return g.op("Div", log(g, self), g.op("Constant", value_t=torch.tensor(_ln2)))
def is_floating_point(g, self):
if symbolic_helper._is_fp(self):
return g.op("Constant", value_t=torch.BoolTensor([1]))
return g.op("Constant", value_t=torch.BoolTensor([0]))
def __is_(g, self, other):
if symbolic_helper._is_none(other):
if symbolic_helper._is_none(self):
return g.op("Constant", value_t=torch.BoolTensor([1]))
return g.op("Constant", value_t=torch.BoolTensor([0]))
return eq(g, self, other)
@wrap_logical_op_with_negation
def __isnot_(g, self, other):
return __is_(g, self, other)
def one_hot(g, self, num_classes):
values = g.op("Constant", value_t=torch.LongTensor([0, 1]))
# onnxruntime supports limited type combinations for OneHot.
if num_classes.type().scalarType() in {"Byte", "Char", "Int", "Short"}:
num_classes = g.op("Cast", num_classes, to_i=_C_onnx.TensorProtoDataType.INT64)
return g.op("OneHot", self, num_classes, values, axis_i=-1)
@symbolic_helper.parse_args("v", "i", "v", "v")
def gather(g, self, dim, index, sparse_grad=False):
if symbolic_helper._maybe_get_const(sparse_grad, "i"):
return symbolic_helper._unimplemented("gather", "sparse_grad == True")
# NOTE: This workaround is needed since GatherElement is only supported
# since opset 11, and Gather in ONNX is not the same as torch.gather.
dtype = self.type().scalarType()
values = g.op("Constant", value_t=torch.LongTensor([0, 1]))
depth = size(g, self, g.op("Constant", value_t=torch.LongTensor([dim])))
index = g.op(
"Cast",
g.op("OneHot", index, depth, values, axis_i=dim),
to_i=_type_utils.JitScalarType.from_name(dtype).onnx_type(),
)
mul = g.op("Mul", symbolic_helper._unsqueeze_helper(g, self, [dim + 1]), index)
return symbolic_helper._reducesum_helper(g, mul, axes_i=[dim], keepdims_i=0)
@symbolic_helper.parse_args("v", "is", "i", "i")
def _var_mean(g, input, dim, correction, keepdim):
if dim is None:
mean = g.op("ReduceMean", input, keepdims_i=0)
t_mean = mean
num_elements = numel(g, input)
else:
mean = g.op("ReduceMean", input, axes_i=dim, keepdims_i=keepdim)
t_mean = g.op("ReduceMean", input, axes_i=dim, keepdims_i=1)
redudced_dims = g.op("Shape", input)
# dim could contain one or multiple dimensions
redudced_dims = g.op(
"Gather",
redudced_dims,
g.op("Constant", value_t=torch.tensor(dim)),
axis_i=0,
)
num_elements = g.op("ReduceProd", redudced_dims, keepdims_i=0)
sub_v = g.op("Sub", input, t_mean)
sqr_sub = g.op("Mul", sub_v, sub_v)
keepdim_mean = 0 if dim is None else keepdim
var = g.op("ReduceMean", sqr_sub, axes_i=dim, keepdims_i=keepdim_mean)
# Correct bias in calculating variance, by dividing it over (N - correction) instead on N
if correction is None:
correction = 1
if correction != 0:
num_elements = g.op(
"Cast", num_elements, to_i=_C_onnx.TensorProtoDataType.FLOAT
)
one = g.op("Constant", value_t=torch.tensor(correction, dtype=torch.float))
mul = g.op("Mul", var, num_elements)
var = g.op("Div", mul, g.op("Sub", num_elements, one))
return var, mean
def std(g, input, *args):
var, _ = var_mean(g, input, *args)
return g.op("Sqrt", var)
def var(g, input, *args):
var, _ = var_mean(g, input, *args)
return var
# var_mean (and all variance-related functions) has multiple signatures, so need to manually figure
# out the correct arguments:
# aten::var_mean(Tensor self, bool unbiased)
# aten::var_mean(Tensor self, int[1] dim, bool unbiased, bool keepdim=False)
# aten::var_mean(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False)
def var_mean(g, input, *args):
if len(args) == 1:
return _var_mean(g, input, None, args[0], None)
else:
return _var_mean(g, input, *args)
def std_mean(g, input, *args):
var, mean = var_mean(g, input, *args)
return g.op("Sqrt", var), mean
@symbolic_helper.parse_args("v", "is", "i")
def logsumexp(g, input, dim, keepdim):
return g.op("ReduceLogSumExp", input, axes_i=dim, keepdims_i=keepdim)
def arange(g, *args):
if symbolic_helper.is_caffe2_aten_fallback():
return g.at("arange", *args)
def _get_arange_dtype(dtype):
dtype = symbolic_helper._maybe_get_const(dtype, "i")
return dtype
def _float_step_convert(range_tensor):
if symbolic_helper._is_fp(range_tensor):
range_tensor = g.op(
"Cast",
g.op("Ceil", range_tensor),
to_i=_type_utils.JitScalarType.INT64.onnx_type(),
)
return range_tensor
if len(args) == 2 or len(args) == 5:
if len(args) == 2:
# aten::arange(Scalar end, Tensor out)
dtype = None
else:
# aten::arange(Scalar end, ScalarType dtype, Layout, Device, bool pin_memory)
dtype = _get_arange_dtype(args[1])
dtype, end, start, step = symbolic_helper._arange_cast_helper(
g, end=args[0], dtype=dtype
)
end = symbolic_helper._unsqueeze_helper(g, end, [0])
range_tensor = _float_step_convert(end)
arange_tensor = symbolic_helper._squeeze_helper(
g, nonzero(g, ones(g, range_tensor, dtype, None, None)), [1]
)
return g.op(
"Cast", arange_tensor, to_i=_type_utils.JitScalarType(dtype).onnx_type()
)
elif len(args) == 4 or len(args) == 7:
if len(args) == 4:
# aten::arange(Scalar start, Scalar end, Scalar step, Tensor out)
dtype = None
else:
# aten::arange(Scalar start, Scalar end, Scalar step, ScalarType dtype, Layout, Device, bool pin_memory)
dtype = _get_arange_dtype(args[3])
dtype, end, start, step = symbolic_helper._arange_cast_helper(
g, start=args[0], end=args[1], step=args[2], dtype=dtype
)
step = symbolic_helper._unsqueeze_helper(g, step, [0])
end = symbolic_helper._unsqueeze_helper(g, end, [0])
start = symbolic_helper._unsqueeze_helper(g, start, [0])
range_tensor = _float_step_convert(g.op("Div", g.op("Sub", end, start), step))
arange_tensor = symbolic_helper._squeeze_helper(
g, nonzero(g, ones(g, range_tensor, None, None, None)), [1]
)
arange_tensor = g.op("Add", g.op("Mul", arange_tensor, step), start)
return g.op(
"Cast", arange_tensor, to_i=_type_utils.JitScalarType(dtype).onnx_type()
)
elif len(args) == 6:
# aten::arange(Scalar start, Scalar end, ScalarType dtype, Layout, Device, bool pin_memory)
dtype = _get_arange_dtype(args[2])
dtype, end, start, step = symbolic_helper._arange_cast_helper(
g, start=args[0], end=args[1], dtype=dtype
)
end = symbolic_helper._unsqueeze_helper(g, end, [0])
start = symbolic_helper._unsqueeze_helper(g, start, [0])
range_tensor = _float_step_convert(g.op("Sub", end, start))
arange_tensor = g.op(
"Add",
symbolic_helper._squeeze_helper(
g, nonzero(g, ones(g, range_tensor, dtype, *(args[3:]))), [1]
),
start,
)
return g.op(
"Cast", arange_tensor, to_i=_type_utils.JitScalarType(dtype).onnx_type()
)
else:
raise NotImplementedError(
"Unknown aten::arange signature taking " + str(len(args)) + " arguments."
)
def linspace(g, start, end, steps, dtype, layout, device, pin_memory):
range_tensor = symbolic_helper._arange_helper(g, steps, None)
step = div(
g,
sub(g, end, start),
sub(g, steps, g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64))),
)
return add(g, mul(g, range_tensor, step), start)
def lift(g, self):
# at::lift() is a no-op from the perspective of tracing for onnx
return self
def masked_fill(g, self, mask, value):
mask = _cast_Bool(g, mask, False) # type: ignore[name-defined]
value = symbolic_helper._maybe_get_scalar(value)
return g.op("Where", mask, symbolic_helper._if_scalar_type_as(g, value, self), self)
def index(g, self, index):
if symbolic_helper.is_caffe2_aten_fallback():
return g.at("index", self, index, overload_name="Tensor")
if symbolic_helper._is_packed_list(index):
indices = symbolic_helper._unpack_list(index)
else:
indices = [index]
def try_mask_to_index(index):
if not symbolic_helper._is_none(index) and (
index.type().scalarType() == "Byte" or index.type().scalarType() == "Bool"
):
if GLOBALS.export_onnx_opset_version < 9:
raise RuntimeError(
"Exporting masked indices are only supported after ONNX opset 9."
)
warnings.warn(
"Exporting aten::index operator with indices of type Byte. "
"Only 1-D indices are supported. In any other case, "
"this will produce an incorrect ONNX graph."
)
index = symbolic_helper._squeeze_helper(g, nonzero(g, index), [1])
return index
indices = [try_mask_to_index(idx) for idx in indices]
if len(indices) == 1:
return symbolic_helper._select_helper(
g, self, 0, indices[0], apply_reshape=False
)
else:
# Multiple tensors as indices. Each tensor could either be
# 1. prim::Constant()
# representing ":" in python indexing. E.g. tensor[:, :]
# 2. prim::Constant[value=...] or tensor output
# representing advanced indexing. E.g. tensor[[0, 1], [2, 0]].
# For more info on advanced indexing,
# check https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
# Consider a general case of
# t: [x_1, y_1, y_2, ..., x_m, ..., y_n]
# where t is a tensor of rank m+n, {x_i} are axes where tensor index is provided, and {y_i} are axes for ":".
# Same results can be achieved through transposing t into
# t: [x_1, x_2, ..., x_m, y_1, y_2, ..., y_n]
# and use gatherND. However ONNX does not have gatherND, to use 1d gather we'll need to flatten t
# and process the tensor indices.
# t: [x_1 * x_2 * ... * x_m, y_1 * y_2 * ... * y_n]
# tensor index = \sum_{i=1}^m (ind_i * \prod_{j=i+1}^m (x_j))
# After gather, reshape and transpose back.
adv_idx_indices = [
i for i, idx in enumerate(indices) if not symbolic_helper._is_none(idx)
]
if len(adv_idx_indices) == 0:
return self
elif len(adv_idx_indices) == 1:
return index_select(
g, self, adv_idx_indices[0], indices[adv_idx_indices[0]]
)
else:
rank = symbolic_helper._get_tensor_rank(self)
if rank is None:
raise NotImplementedError(
"Unsupported aten::index operator of advanced indexing on tensor of unknown rank. "
+ "Try turning on shape inference during export: "
+ "torch.onnx._export(..., onnx_shape_inference=True)."
)
# TODO: If indexing is supported natively in ONNX in future opsets,
# update the warning to recommend exporting with higher opset version.
warnings.warn(
"Exporting aten::index operator of advanced indexing in opset "
+ str(GLOBALS.export_onnx_opset_version)
+ " is achieved by combination of multiple ONNX operators, "
+ "including Reshape, Transpose, Concat, and Gather. "
+ "If indices include negative values, the exported graph will produce incorrect results."
)
adv_idx_count = len(adv_idx_indices)
shape_tensor = _shape_as_tensor(g, self)
dim_tensor_list = [
g.op(
"Gather",
shape_tensor,
g.op("Constant", value_t=torch.LongTensor([dim])),
axis_i=0,
)
for dim in range(rank)
]
self = g.op(
"Transpose",
self,
perm_i=adv_idx_indices
+ [i for i in range(rank) if i not in adv_idx_indices],
)
self = g.op("Flatten", self, axis_i=adv_idx_count)
# Note that tensor indices will be broadcasted while accumulating. Thus we get the final subarray shape as well.
cum_adv_index = indices[adv_idx_indices[-1]]
multiplier = dim_tensor_list[adv_idx_indices[-1]]
for i in range(adv_idx_count - 2, -1, -1):
adv_index = g.op("Mul", indices[adv_idx_indices[i]], multiplier)
cum_adv_index = g.op("Add", cum_adv_index, adv_index)
multiplier = g.op(
"Mul", multiplier, dim_tensor_list[adv_idx_indices[i]]
)
# perform gather
self = index_select(g, self, 0, cum_adv_index)
cum_adv_index_shape_tensor = _shape_as_tensor(g, cum_adv_index)
# check if all advanced indices are consecutive.
# Refer to https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#combining-advanced-and-basic-indexing
# to understand how the subarray position is decided.
if adv_idx_indices == list(
range(adv_idx_indices[0], adv_idx_indices[-1] + 1)
):
# unfold regular index axes
folded_adv_idx_shape_list = [
g.op("Constant", value_t=torch.LongTensor([-1]))
] + [
dim_tensor_list[i] for i in range(rank) if i not in adv_idx_indices
]
folded_adv_idx_shape = g.op(
"Concat", *folded_adv_idx_shape_list, axis_i=0
)
self = symbolic_helper._reshape_helper(g, self, folded_adv_idx_shape)
# Transpose folded advanced indexed axis to its original location.
adv_idx_permute = (
list(range(1, adv_idx_indices[0] + 1))
+ [0]
+ list(range(adv_idx_indices[0] + 1, rank - adv_idx_count + 1))
)
self = g.op("Transpose", self, perm_i=adv_idx_permute)
# unfold advanced index axes
final_shape_list = (
[dim_tensor_list[i] for i in range(adv_idx_indices[0])]
+ [cum_adv_index_shape_tensor]
+ [
dim_tensor_list[i]
for i in range(adv_idx_indices[0], rank)
if i not in adv_idx_indices
]
)
final_shape = g.op("Concat", *final_shape_list, axis_i=0)
else:
final_shape = g.op(
"Concat",
cum_adv_index_shape_tensor,
*[
dim_tensor_list[i]
for i in range(rank)
if i not in adv_idx_indices
],
axis_i=0,
)
return symbolic_helper._reshape_helper(g, self, final_shape)
@symbolic_helper.parse_args("v", "v", "is", "i", "v")
def linalg_norm(
g,
self: torch._C.Value,
ord: torch._C.Value,
dim: List[int],
keepdim: int,
dtype: torch._C.Value,
):
# Conditions based on https://pytorch.org/docs/stable/generated/torch.linalg.norm.html
ord_value = None
if dim is None:
if symbolic_helper._is_none(ord):
self = symbolic_helper._reshape_helper(g, self, [-1])
ord = g.op("Constant", value_t=torch.LongTensor([2]))
self_dim = symbolic_helper._get_tensor_rank(self)
if self_dim is None:
return symbolic_helper._unimplemented(
"dim", "Input rank must be known at export time."
)
if self_dim == 1:
ord_value = symbolic_helper._parse_arg(ord, "f")
else:
dim = [0, 1]
else:
if len(dim) == 1:
if symbolic_helper._is_none(ord):
ord = g.op("Constant", value_t=torch.LongTensor([2]))
ord_value = symbolic_helper._parse_arg(ord, "f")
if ord_value:
return linalg_vector_norm(g, self, ord_value, dim, keepdim, dtype)
return linalg_matrix_norm(g, self, ord, dim, keepdim, dtype)
@symbolic_helper.parse_args("v", "f", "is", "i", "v")
def linalg_vector_norm(
g,
self: torch._C.Value,
ord: float,
dim: List[int],
keepdim: int,
dtype: torch._C.Value,
):
# Conditions based on https://pytorch.org/docs/stable/generated/torch.linalg.vector_norm.html
if dim is None:
self = symbolic_helper._reshape_helper(g, self, [-1])
keepdim = 0
if ord == math.inf:
result = g.op("ReduceMax", g.op("Abs", self), axes_i=dim, keepdims_i=keepdim)
elif ord == -math.inf:
result = g.op("ReduceMin", g.op("Abs", self), axes_i=dim, keepdims_i=keepdim)
elif ord == 0:
return symbolic_helper._onnx_opset_unsupported_detailed(
"linalg_vector_norm", 9, 11, "ord=0 not supported"
)
else:
ord_op = g.op("Constant", value_t=torch.tensor(ord, dtype=torch.float32))
result = symbolic_helper._reducesum_helper(
g, g.op("Pow", g.op("Abs", self), ord_op), axes_i=dim, keepdims_i=keepdim
)
result = g.op(
"Pow",
result,
g.op(
"Div",
g.op("Constant", value_t=torch.tensor(1, dtype=torch.float32)),
ord_op,
),
)
return result
@symbolic_helper.parse_args("v", "v", "is", "i", "v")
def linalg_matrix_norm(
g,
self: torch._C.Value,
ord: torch._C.Value,
dim: List[int],
keepdim: int,
dtype: torch._C.Value,
):
# Conditions based on https://pytorch.org/docs/stable/generated/torch.linalg.matrix_norm.html
ord_value = symbolic_helper._parse_arg(ord, "s")
if ord_value == "fro":
return frobenius_norm(g, self, dim, keepdim)
elif ord_value == "nuc":
return symbolic_helper._unimplemented("linalg.matrix_norm", "ord==nuc")
else:
ord_value = symbolic_helper._parse_arg(ord, "f")
if ord_value is None:
return frobenius_norm(g, self, dim, keepdim)
if ord_value == 2 or ord_value == -2:
# ord = 2/-2 unimplemented due to lack of operators
# used to calculate singular values
return symbolic_helper._unimplemented("linalg.matrix_norm", "ord==2")
# Wrap the dim vector to handle neagtive dim values
self_dim = symbolic_helper._get_tensor_rank(self)
if self_dim is None:
return symbolic_helper._unimplemented(
"linalg.matrix_norm", "Input rank must be known at export time."
)
# Common implementation for cases with
# ord = 1/-1 and ord = inf/-inf
if dim[0] < 0:
dim[0] += self_dim
if dim[1] < 0:
dim[1] += self_dim
if ord_value == math.inf or ord_value == -math.inf:
dim[0], dim[1] = dim[1], dim[0]
if dim[1] > dim[0] and not keepdim:
dim[1] -= 1
sum = symbolic_helper._reducesum_helper(
g, g.op("Abs", self), axes_i=[dim[0]], keepdims_i=keepdim
)
if ord_value > 0:
result, indices = max(
g,
sum,
dim_or_y=g.op("Constant", value_t=torch.LongTensor([dim[1]])),
keepdim=keepdim,
)
else:
result, indices = min(
g,
sum,
dim_or_y=g.op("Constant", value_t=torch.LongTensor([dim[1]])),
keepdim=keepdim,
)
return result
@symbolic_helper.parse_args("v", "v", "i")
def linalg_cross(g, input, other, dim=-1):
return cross(g, input, other, dim)
@symbolic_helper.parse_args("v", "is", "i")
def frobenius_norm(g, self, dim=None, keepdim=False):
sqr = g.op("Mul", self, self)
sumsqr = symbolic_helper._reducesum_helper(g, sqr, axes_i=dim, keepdims_i=keepdim)
return g.op("Sqrt", sumsqr)
@symbolic_helper.parse_args("v", "i", "b", "v")
def multinomial(g, input, num_samples, replacement=False, generator=None):
if generator is not None and not symbolic_helper._is_none(generator):
symbolic_helper._unimplemented(
"Multinomial", "generator is not supported for multinomial"
)
if not replacement and num_samples > 1:
symbolic_helper._unimplemented(
"Multinomial",
"replacement=False when num_samples > 1 is not supported for multinomial",
)
log_input = log(g, input)
return g.op(
"Multinomial",
log_input,
dtype_i=_C_onnx.TensorProtoDataType.INT64,
sample_size_i=num_samples,
)
def baddbmm(g, self, batch1, batch2, beta, alpha):
dtype = self.type().scalarType()
batch_mul = matmul(g, batch1, batch2)
mul_a = mul(
g,
batch_mul,
g.op(
"Cast", alpha, to_i=_type_utils.JitScalarType.from_name(dtype).onnx_type()
),
)
mul_b = mul(
g,
self,
g.op("Cast", beta, to_i=_type_utils.JitScalarType.from_name(dtype).onnx_type()),
)
return add(g, mul_a, mul_b)
@symbolic_helper.parse_args("v", "s")
def meshgrid(g, tensor_list, indexing: Optional[str] = None):
if indexing is None:
indexing = "ij"
elif indexing not in {"ij", "xy"}:
raise ValueError(f"Unsupported indexing: {indexing}")
if indexing == "xy":
tensor_list[0], tensor_list[1] = tensor_list[1], tensor_list[0]
tensors = [
symbolic_helper._reshape_helper(
g, t, g.op("Constant", value_t=torch.LongTensor([-1]))
)
for t in symbolic_helper._unpack_list(tensor_list)
]
tensors_shape = [g.op("Shape", t) for t in tensors]
out_shape = g.op("Concat", *tensors_shape, axis_i=0)
out = []
for i, t in enumerate(tensors):
shape_i = [g.op("Constant", value_t=torch.ones(1, dtype=torch.int64))] * len(
tensors
)
shape_i[i] = tensors_shape[i]
t_reshaped = _reshape_from_tensor(g, t, g.op("Concat", *shape_i, axis_i=0))
out.append(g.op("Expand", t_reshaped, out_shape))
if indexing == "xy":
out[0], out[1] = out[1], out[0]
return g.op("prim::ListConstruct", *out)
def remainder(g, input, other):
div = _floor_divide(g, input, other)
quo = g.op("Mul", div, other)
return g.op("Sub", input, quo)
@symbolic_helper.parse_args("v", "s")
def gelu(g, self: torch._C.Value, approximate: str = "none"):
if approximate == "tanh":
kBeta = math.sqrt(2 / math.pi)
kKappa = 0.044715
beta = torch.tensor(kBeta, dtype=torch.double)
kappa = torch.tensor(kKappa, dtype=torch.double)
one = torch.tensor(1.0, dtype=torch.double)
half = torch.tensor(0.5, dtype=torch.double)
self_cube = mul(g, self, mul(g, self, self))
inner = mul(g, beta, add(g, self, mul(g, kappa, self_cube)))
return mul(g, half, mul(g, self, add(g, one, g.op("Tanh", inner))))
else:
_sqrt2 = 1.4142135623730951
erf = g.op("Erf", g.op("Div", self, torch.tensor(_sqrt2, dtype=torch.double)))
erf_plusone = add(
g, erf, g.op("Constant", value_t=torch.tensor(1, dtype=torch.double))
)
return mul(
g,
mul(g, self, erf_plusone),
g.op("Constant", value_t=torch.tensor(0.5, dtype=torch.double)),
)
@symbolic_helper.parse_args("v", "i", "v", "v", "f", "i")
def group_norm(g, input, num_groups, weight, bias, eps, cudnn_enabled):
if symbolic_helper.is_caffe2_aten_fallback():
return g.at(
"group_norm",
input,
weight,
bias,
num_groups_i=num_groups,
eps_f=eps,
cudnn_enabled_i=cudnn_enabled,
)
channel_size = symbolic_helper._get_tensor_dim_size(input, 1)
if channel_size is not None:
assert channel_size % num_groups == 0
input_rank = symbolic_helper._get_tensor_rank(input)
if input_rank is None:
return symbolic_helper._unimplemented("group_norm", "unknown input rank")
# 0 in the shape list keeps dimension value unchanged.
shape = [0, num_groups, -1]
input_reshaped = symbolic_helper._reshape_helper(
g, input, g.op("Constant", value_t=torch.LongTensor(shape))
)
# C is always divisible by num_groups
# Due to shape difference. we need to apply weight and bias after
# instance norm computation and reshape
weight_ = g.op(
"Constant",
value_t=torch.tensor([1.0] * num_groups).type(
"torch." + input.type().scalarType() + "Tensor"
),
)
bias_ = g.op(
"Constant",
value_t=torch.tensor([0.0] * num_groups).type(
"torch." + input.type().scalarType() + "Tensor"
),
)
norm_reshaped = g.op(
"InstanceNormalization", input_reshaped, weight_, bias_, epsilon_f=eps
)
norm = symbolic_helper._reshape_helper(g, norm_reshaped, g.op("Shape", input))
if weight is None or weight.node().mustBeNone():
weight_value = torch.tensor([1.0]).type(
"torch." + input.type().scalarType() + "Tensor"
)
weight = g.op("Constant", value_t=weight_value)
if bias is None or bias.node().mustBeNone():
bias_value = torch.tensor([0.0]).type(
"torch." + input.type().scalarType() + "Tensor"
)
bias = g.op("Constant", value_t=bias_value)
# Norm has shape [N, C, *] so we reshape weight and bias to [C, *]
axes = list(range(1, input_rank - 1))
return add(
g,
mul(g, norm, symbolic_helper._unsqueeze_helper(g, weight, axes)),
symbolic_helper._unsqueeze_helper(g, bias, axes),
)
@symbolic_helper.parse_args("v", "v", "i")
def _weight_norm(g, weight_v, weight_g, dim):
rank = symbolic_helper._get_tensor_rank(weight_v)
if rank is not None:
# W = g * ((v) / ||v||)
# Compute norm_except_dim for l2 norm. dim = None means over all dims
# torch's weight_norm module sets dim = -1 if it's None.
# This conflicts the logic for negative axes to access dims backwards
# TODO: Might need a fix in torch group_norm module
axes = list(range(rank))
if dim is not None:
if dim < -1:
dim += rank
if dim != -1:
axes.remove(dim)
norm_v = norm(g, weight_v, 2, axes, 1)
div = g.op("Div", weight_v, norm_v)
return g.op("Mul", div, weight_g)
elif symbolic_helper.is_caffe2_aten_fallback():
return g.at("_weight_norm", weight_v, weight_g, dim_i=dim)
else:
raise RuntimeError(
"Unsupported: ONNX export of _weight_norm for tensor " "of unknown rank."
)
def dim(g, self):
"""Implement the dim functionality available for a pytorch tensor in ONNX"""
# ONNX does not support dim directly in this opset so we can use 2 ops to get the info
shape = g.op("Shape", self)
return g.op("Size", shape)
def __getitem_(g, self, i):
return select(g, self, g.op("Constant", value_t=torch.tensor([0])), i)
def item(g, self):
return self
def take(g, self, index):
self_flattened = symbolic_helper._reshape_helper(
g, self, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64))
)
out = index_select(g, self_flattened, 0, index)
out = reshape_as(g, out, index)
return out
def _kl_div_log_target_impl(g, input, target):
diff_ = sub(g, target, input)
exp_ = exp(g, target)
output = mul(g, exp_, diff_)
return output
def _kl_div_non_log_target_impl(g, input, target):
log_ = log(g, target)
diff_ = sub(g, log_, input)
output_pos = mul(g, target, diff_)
zeros_ = zeros_like(g, output_pos)
mask_ = gt(g, target, g.op("Constant", value_t=torch.tensor(0)))
output = where(g, mask_, output_pos, zeros_)
return output
@symbolic_helper.parse_args("v", "v", "i", "b")
def kl_div(g, input, target, reduction, log_target):
if log_target:
output = _kl_div_log_target_impl(g, input, target)
else:
output = _kl_div_non_log_target_impl(g, input, target)
if reduction == 0:
return output
elif reduction == 1:
return g.op("ReduceMean", output, keepdims_i=0)
elif reduction == 2:
return symbolic_helper._reducesum_helper(g, output, keepdims_i=0)
else:
return symbolic_helper._onnx_unsupported(
"kl_div with reduction other than none, mean, or sum."
)
@symbolic_helper.parse_args("v", "v", "is", "i")
def as_strided(g, self, sizes, strides, offset=None):
sizes = symbolic_helper._maybe_get_const(sizes, "is")
rank = len(strides)
self_1d = symbolic_helper._reshape_helper(
g, self, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64))
)
ind: Optional[torch.Tensor]
if not symbolic_helper._is_value(sizes):
ind = torch.tensor([0], dtype=torch.long)
for i, (size, stride) in enumerate(zip(sizes, strides)):
r_size = [1] * rank
r_size[i] = -1
ind = ind + torch.arange(size).view(r_size) * stride
if offset:
ind = ind + offset
return g.op("Gather", self_1d, g.op("Constant", value_t=ind))
else:
ind = None
for i, stride in enumerate(strides):
r_size = [1] * rank
r_size[i] = -1
size = select(
g,
sizes,
g.op("Constant", value_t=torch.tensor([0])),
g.op("Constant", value_t=torch.tensor(i)),
)
tmp_ind = symbolic_helper._reshape_helper(
g,
arange(g, size, 4, None, None, None),
g.op("Constant", value_t=torch.tensor(r_size)),
)
tmp_ind = g.op(
"Mul", tmp_ind, g.op("Constant", value_t=torch.tensor([stride]))
)
if ind is None:
ind = tmp_ind
else:
ind = g.op("Add", ind, tmp_ind)
if offset:
ind = g.op("Add", ind, g.op("Constant", torch.tensor([offset])))
return g.op("Gather", self_1d, ind)
def __derive_index(g, index, start, step):
return g.op("Add", start, g.op("Mul", index, step))
# Source code for aten op can be found here: pytorch/torch/csrc/jit/runtime/register_prim_ops.cpp
# if (step > 0 && lo < hi) {
# push(stack, 1 + (hi - 1 - lo) / step);
# } else if (step < 0 && lo > hi) {
# push(stack, 1 + (lo - 1 - hi) / (0 - step));
# } else {
# push(stack, 0);
# }
def __range_length(g, lo, hi, step):
sub = g.op("Sub", hi, lo)
div = g.op("Ceil", true_divide(g, sub, step))
return g.op("Cast", div, to_i=_C_onnx.TensorProtoDataType.INT64)
def linear(g, input, weight, bias):
rank = symbolic_helper._get_tensor_rank(input)
weight = t(g, weight)
if rank == 2 and not bias.node().mustBeNone():
alpha = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64))
beta = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64))
output = addmm(g, bias, input, weight, alpha, beta)
else:
output = matmul(g, input, weight)
if not bias.node().mustBeNone():
output = add(g, bias, output)
return output
@symbolic_helper.parse_args("v", "b", "i", "v", "v", "v", "v")
def hann_window(
g,
window_length,
periodic=True,
dtype: Optional[int] = None,
layout=None,
device=None,
pin_memory=None,
requires_grad=False,
):
if dtype is None:
dtype_ = torch.get_default_dtype()
if not dtype_ or not dtype_.is_floating_point:
dtype_ = torch.float
scalar_type = _type_utils.JitScalarType.from_dtype(dtype_)
else:
scalar_type = _type_utils.JitScalarType(dtype)
n_array = arange(g, window_length, 4, None, None, None)
output = g.op("Cast", n_array, to_i=_C_onnx.TensorProtoDataType.FLOAT)
output = mul(
g, g.op("Constant", value_t=torch.tensor(math.pi, dtype=torch.float)), output
)
if periodic is False:
window_length = sub(
g, window_length, g.op("Constant", value_t=torch.tensor(1, dtype=torch.int))
)
output = div(g, output, window_length)
output = g.op(
"Cast",
square(g, sin(g, output)),
to_i=scalar_type.onnx_type(),
)
return output
def mv(g, self, vec):
return matmul(g, self, vec)
def dot(g, self, other):
return matmul(g, self, other)
@symbolic_helper.parse_args("v", "t", "t")
def movedim(g, self, source, destination):
# This is a pythonic implementation mostly taken from aten/src/ATen/native/TensorShape.cpp::movedim
source = source.view(-1)
destination = destination.view(-1)
assert source.size() == destination.size()
if (source == destination).all():
return self
self_rank = symbolic_helper._get_tensor_rank(self)
assert self_rank is not None
perm = list(range(self_rank))
src_dims = perm.copy()
dst_dims = perm.copy()
for src, dst in zip(source.tolist(), destination.tolist()):
perm[dst] = src
src_dims[src] = -1
dst_dims[dst] = -1
src_dims = [dim for dim in src_dims if dim != -1]
dst_dims = [dim for dim in dst_dims if dim != -1]
for src, dst in zip(src_dims, dst_dims):
perm[dst] = src
return g.op("Transpose", self, perm_i=perm)
@symbolic_helper.parse_args("v", "v")
def fill(g, self, value):
dtype = self.type().scalarType()
if dtype is None:
dtype = _type_utils.JitScalarType.FLOAT
else:
dtype = _type_utils.JitScalarType.from_name(dtype)
return full_like(g, self, value, dtype)
def index_add(g, self, dim, index, other, alpha=None):
warnings.warn(
"Warning: ONNX export does not support duplicated values in 'index' field, "
+ "this will cause the ONNX model to be incorrect."
)
# ONNX does not support "alpha" argument, unlike aten index_add
# See: https://github.com/pytorch/pytorch/pull/65993#issuecomment-953151102 for more context
if alpha and symbolic_helper._scalar(symbolic_helper._maybe_get_scalar(alpha)) != 1:
return symbolic_helper._unimplemented("index_add", "alpha != 1")
dim = symbolic_helper._maybe_get_const(dim, "i")
if dim is None:
raise NotImplementedError(
"ONNX export does NOT support exporting 'index_add_()' function with "
+ "unknown 'dim' value."
)
self_dim_rank = symbolic_helper._get_tensor_rank(self)
other_dim_rank = symbolic_helper._get_tensor_rank(other)
if self_dim_rank is None or other_dim_rank is None:
raise NotImplementedError(
"ONNX export does NOT support exporting 'index_add_()' function while "
+ "the rank of self tensor or tensor to be added is unknown."
)
if other_dim_rank != self_dim_rank:
delta = self_dim_rank - other_dim_rank
for i in range(delta):
other = symbolic_helper._unsqueeze_helper(
g, other, [symbolic_helper._get_tensor_rank(other)]
)
other_dim_size = symbolic_helper._get_tensor_dim_size(other, dim)
self_dim_size = symbolic_helper._get_tensor_dim_size(self, dim)
if (other_dim_size is not None) and (self_dim_size is not None):
if other_dim_size > self_dim_size:
raise NotImplementedError(
"ONNX export does NOT support exporting 'index_add_()' function with "
+ "duplicated values in 'index' parameter yet."
)
# Construct a new shape. It's almost as same as self except the size of the 'dim'
# dimension is 1, so that we can expand other dimensions as expected.
new_shape_axes = list(range(self_dim_rank))
new_shape_starts = [0 for i in range(self_dim_rank)]
new_shape_ends = [sys.maxsize if (i != dim) else 1 for i in range(self_dim_rank)]
new_shape = symbolic_helper._slice_helper(
g, self, axes=new_shape_axes, starts=new_shape_starts, ends=new_shape_ends
)
other = expand_as(g, other, new_shape)
for i in range(dim):
index = symbolic_helper._unsqueeze_helper(g, index, [0])
for i in range(self_dim_rank - dim - 1):
index = symbolic_helper._unsqueeze_helper(
g, index, [symbolic_helper._get_tensor_rank(index)]
)
return scatter_add(g, self, dim, expand_as(g, index, other), other)
@symbolic_helper.parse_args("v", "is", "is")
def roll(g, self, shifts, dims):
assert len(shifts) == len(dims)
result = self
for i in range(len(shifts)):
shapes = []
shape = symbolic_helper._slice_helper(
g, result, axes=[dims[i]], starts=[-shifts[i]], ends=[sys.maxsize]
)
shapes.append(shape)
shape = symbolic_helper._slice_helper(
g, result, axes=[dims[i]], starts=[0], ends=[-shifts[i]]
)
shapes.append(shape)
result = g.op("Concat", *shapes, axis_i=dims[i])
return result
@symbolic_helper.parse_args("v", "v", "i")
def cross(g, input, other, dim=None):
dim = symbolic_helper._get_dim_for_cross(input, dim)
# If we have two tensors such that
# A = [a, b, c], B = [d, e, f], we permute the tensor such that we have
# After first roll,
# A' = [b, c, a], B' = [f, d, e], so that we calculate (b*f, c*d, a*e)
roll_x_1 = roll(g, input, [2], [dim])
roll_y_1 = roll(g, other, [1], [dim])
# After second roll,
# A' = [c, a, b], B' = [e, f, d], so that we calculate (c*e, a*f, b*d)
roll_x_2 = roll(g, input, [1], [dim])
roll_y_2 = roll(g, other, [2], [dim])
# cross product is calculated as
# result = [(b*f - c*e), (c*d - a*f), (a*e - b*d)]
return sub(g, mul(g, roll_x_1, roll_y_1), mul(g, roll_x_2, roll_y_2))
def cdist(g, x1, x2, p=2.0, compute_mode="use_mm_for_euclid_dist_if_necessary"):
# X1.shape = (B * P * D), X2.shape = (B * R * D)
# In order to respect numpy style broadcasting as demonstrated in
# https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md
# we unsqueeze both input tensors
# Currently we ignore the 'compute_mode' variable as we use default to
# using matrix multiplication to calculate the euclidean distance
rank = symbolic_helper._get_tensor_rank(x1)
assert rank is not None
broadcasted_x1 = symbolic_helper._unsqueeze_helper(g, x1, [rank - 1])
broadcasted_x2 = symbolic_helper._unsqueeze_helper(g, x2, [rank - 2])
return pairwise_distance(
g, broadcasted_x1, broadcasted_x2, p, eps=1e-06, keepdim=False
)
def lerp(g, self, end, weight):
# Conditional for better numeric. This has been discussed in
# https://github.com/pytorch/pytorch/pull/18871
diff = g.op("Sub", end, self)
return where(
g,
g.op("Less", weight, g.op("Constant", value_t=torch.tensor(0.5))),
g.op("Add", self, g.op("Mul", weight, diff)),
g.op(
"Sub",
end,
g.op(
"Mul",
diff,
g.op("Sub", g.op("Constant", value_t=torch.tensor(1.0)), weight),
),
),
)
def broadcast_tensors(g, self):
all_tensors = symbolic_helper._unpack_list(self)
t_with_final_shape = zeros_like(g, all_tensors[0])
# Add operator supports multidirectional broadcasting. So we leverage this function
# to infer the final shape generated by the broadcast.
for t in all_tensors:
t_with_final_shape = add(g, t_with_final_shape, t)
t_list = [expand_as(g, t, t_with_final_shape) for t in all_tensors]
return g.op("prim::ListConstruct", *t_list)
class Prim:
domain = "prim"
@staticmethod
def ConstantSplit(g, self, split_size, dim):
size = symbolic_helper._get_tensor_dim_size(self, dim)
if size is None:
return symbolic_helper._unimplemented(
"prim::ConstantSplit", "unknown dimension size"
)
splits = [split_size] * (size // split_size)
leftover = size % split_size
if leftover:
splits.append(leftover)
return g.op("Split", self, split_i=splits, axis_i=dim, outputs=len(splits))
# TODO: It would be better to export this as a chunk directly, as this is
# less sensitive to changes in input size.
# TODO: Once we have proper scoping, stop reimplementing chunk, delete this
# method, and use the desugared version
@staticmethod
def ConstantChunk(g, self, chunks, dim):
dim_size = symbolic_helper._get_tensor_dim_size(self, dim)
if dim_size is None:
return symbolic_helper._unimplemented(
"prim::ConstantChunk", "unknown dimension size"
)
split_size = (dim_size + chunks - 1) // chunks
return Prim.ConstantSplit(g, self, split_size, dim)
@staticmethod
def shape(g, self):
return g.op("Shape", self)
@staticmethod
def max(g, self, other):
return op_with_optional_float_cast(g, "Max", self, other, opset_before=12)
@staticmethod
def min(g, self, other=None):
if not other:
if symbolic_helper._is_packed_list(self):
self = stack(g, self, g.op("Constant", value_t=torch.tensor([0])))
return min(g, self)
return min(g, self, other)
@staticmethod
def data(g, self):
return self
@staticmethod
def ListConstruct(g, *inputs, **kwargs):
return None
@staticmethod
def ListUnpack(g, *inputs, **kwargs) -> Optional[List[_C.Value]]:
if len(inputs) == 1 and inputs[0].node().kind() == "prim::ListConstruct":
# Cancel the previous node if it is ListConstruct by returning its inputs
# TODO(justinchuby): Use a public method in the helper module
return symbolic_helper._unpack_list(inputs[0])
return None
@staticmethod
def TupleConstruct(g, *inputs, **kwargs):
return None
@staticmethod
def Uninitialized(g, *inputs, **kwargs):
return None
# exists to refine the type of the Value
# if x is an optional Tensor, unchecked_cast will cast
# x to Tensor, so the rest of the graph knows that x is a Tensor
# this doesn't do anything in runtime and is a noop in ONNX
@staticmethod
def unchecked_cast(g, self):
return self
@staticmethod
def dtype(g, self):
scalar_name = symbolic_helper._try_get_scalar_type(self)
if scalar_name is None:
scalar_name = "Float"
scalar_type = _type_utils.JitScalarType.from_name(scalar_name)
# This node records a torch dtype as int
return g.op("Constant", value_t=torch.tensor(scalar_type))
@staticmethod
def tolist(g, input, dim_val, elem_ty_val):
"""tolist is currently supported only for 1D input tensors.
dim_val and elem_ty_val represent dimension and type annotations
that need to match dimension and type of the input tensor.
"""
dim = symbolic_helper._maybe_get_const(dim_val, "i")
if dim > 1:
return symbolic_helper._unimplemented("prim::tolist", "dim_val > 1")
return input
# -----------------------------------------------------------------------------
# Symbolic functions that need extra context
# -----------------------------------------------------------------------------
@staticmethod
def device(ctx: SymbolicContext, g: _C.Graph, *inputs, **kwargs) -> None:
output_type = ctx.cur_node.output().type()
if isinstance(output_type, _C.DeviceObjType):
return None
return symbolic_helper._unimplemented(
"prim::device",
f"output type should be 'DeviceObjType', not '{output_type.kind()}'",
)
@staticmethod
def Loop(ctx: SymbolicContext, g, *inputs, **attrs):
n = ctx.cur_node
env = ctx.env
params_dict = ctx.params_dict
operator_export_type = GLOBALS.operator_export_type
opset_version = GLOBALS.export_onnx_opset_version
new_op_outputs = g.op("Loop", *inputs, outputs=n.outputsSize())
new_node = (
new_op_outputs[0].node() if n.outputsSize() > 1 else new_op_outputs.node()
)
for b in n.blocks():
new_block = new_node.addBlock()
# Copy input metadata to subblock
#
# prim::Loop(iter, cond, input_1, ..., input_n)
# block0(iter, input_1, ..., input_n)
#
# For `Loop` node, copy metadata for `iter`, `input_1`, ..., `input_n`.
for i, b_in in enumerate(b.inputs()):
if i == 0 and i < len(inputs):
b_in.setType(inputs[i].type())
# For optional block inputs, they may switch between None not-None inside
# the loop body, so if the loop input is not optional, the block input may
# still need to be optional.
if (
i > 0
and (i + 1) < len(inputs)
and not isinstance(b_in.type(), _C.OptionalType)
):
b_in.setType(inputs[i + 1].type())
torch._C._jit_pass_onnx_block(
b, new_block, operator_export_type, env, False # type:ignore[arg-type]
)
new_op_outputs = torch._C._jit_pass_fixup_onnx_controlflow_node(
new_node, opset_version
)
# Run shape type inference for Loop after subblock is converted.
if GLOBALS.onnx_shape_inference:
torch._C._jit_pass_onnx_node_shape_type_inference(
new_node, params_dict, opset_version
)
return new_op_outputs
@staticmethod
def If(ctx: SymbolicContext, g, *inputs, **attrs):
n = ctx.cur_node
block = ctx.onnx_block
env = ctx.env
params_dict = ctx.params_dict
operator_export_type = GLOBALS.operator_export_type
opset_version = GLOBALS.export_onnx_opset_version
static_if = inputs[0].node().kind() == "onnx::Constant"
if static_if:
# Fold static if
#
# The torch IR
# graph(%embedding_matrix.1 : Float(10, 15, strides=[15, 1], requires_grad=0, device=cpu),
# %input.1 : Long(6, strides=[1], requires_grad=0, device=cpu), ...
# %65 : Bool(requires_grad=0, device=cpu) = prim::Constant[value={0}]()
# %21 : Long(device=cpu) = aten::eq(%20, %64)
# %22 : Long(device=cpu) = prim::If(%21)
# block0():
# %23 : Long(device=cpu) = aten::is_floating_point(%input.1)
# -> (%23)
# block1():
# -> (%65)
# %input.53 : Tensor, %weight : Tensor = prim::If(%22)
# block0():
# -> (%embedding_matrix.1, %input.1)
# block1():
# -> (%input.1, %embedding_matrix.1)
# %26 : int[] = aten::size(%input.53)
#
# The converted ONNX graph
# %10 : Bool(device=cpu) = onnx::Constant[value={0}]()
# %14 : Bool(device=cpu) = onnx::Equal(%13, %8)
# %15 : Bool(requires_grad=0, device=cpu) = onnx::Constant[value={0}]()
# %16 : Long(1, strides=[1], device=cpu) = onnx::Shape(%input.1)
input_flag = symbolic_helper._node_get(inputs[0].node(), "value").tolist()
const_value = (
all(input_flag) if isinstance(input_flag, list) else bool(input_flag)
)
block_idx = 0 if const_value else 1
current_b = list(n.blocks())[block_idx]
env = torch._C._jit_pass_onnx_block(
current_b,
block,
operator_export_type, # type:ignore[arg-type]
env, # type:ignore[arg-type]
True,
)
if_output_list = list(n.outputs())
current_b_list = list(current_b.outputs())
final_b_list = []
for idx in range(len(if_output_list)):
if current_b_list[idx] not in env:
raise RuntimeError(
f"The sub block ATen output {current_b_list[idx]} is not in env."
) # type:ignore[operator]
onnx_b = env[current_b_list[idx]]
final_b_list.append(onnx_b)
return final_b_list
else:
new_op_outputs = g.op("If", *inputs, outputs=n.outputsSize())
new_node = (
new_op_outputs[0].node()
if n.outputsSize() > 1
else new_op_outputs.node()
)
for b in n.blocks():
new_block = new_node.addBlock()
torch._C._jit_pass_onnx_block(
b,
new_block,
operator_export_type, # type:ignore[arg-type]
env,
False,
)
new_op_outputs = torch._C._jit_pass_fixup_onnx_controlflow_node(
new_node, opset_version
)
# Run shape type inference for If after subblock is converted.
if GLOBALS.onnx_shape_inference:
torch._C._jit_pass_onnx_node_shape_type_inference(
new_node, params_dict, opset_version
)
return new_op_outputs
@staticmethod
def Constant(ctx: SymbolicContext, g, *inputs, **attrs):
n = ctx.cur_node
if n.mustBeNone():
return None
# This must go before checking for string values, because some device constants
# have string values, but we want to keep them as unconverted Device types so
# that eq() can work on them.
if isinstance(n.output().type(), _C.DeviceObjType):
return None
if n.kindOf("value") == "t":
return g.op("Constant", value_t=symbolic_helper._node_get(n, "value"))
if n.kindOf("value") == "s":
return g.op("Constant", value_s=symbolic_helper._node_get(n, "value"))
elif n.output().type().isSubtypeOf(
_C.ListType.ofInts()
) or n.output().type().isSubtypeOf(_C.ListType.ofFloats()):
return g.op(
"Constant", value_t=torch.tensor(symbolic_helper._node_get(n, "value"))
)
else:
raise RuntimeError(
f"Unsupported prim::Constant kind: `{n.kindOf('value')}`. Send a bug report."
)
class Onnx:
domain = "onnx"
# -----------------------------------------------------------------------------
# Symbolic functions that need extra context
# -----------------------------------------------------------------------------
@staticmethod
def Placeholder(ctx: SymbolicContext, g, *inputs, **attrs):
n = ctx.cur_node
block = ctx.onnx_block
env = ctx.env
return torch._C._jit_onnx_convert_pattern_from_subblock(block, n, env)
| pytorch-master | torch/onnx/symbolic_opset9.py |
"""Importing this patches torch._C classes to add ONNX conveniences."""
import numbers
import re
from typing import Any, Iterable, Tuple, Union
import torch
from torch import _C
from torch._C import _onnx as _C_onnx
from torch.onnx import _deprecation
from torch.onnx._globals import GLOBALS
# TODO(#78694): Refactor the patching process to make it more transparent to users.
def _graph_op(
g: torch._C.Graph,
opname: str,
*raw_args: torch._C.Value,
outputs: int = 1,
**kwargs,
) -> Union[torch._C.Value, Tuple[torch._C.Value, ...]]:
r"""Creates an ONNX operator "opname", taking "args" as inputs and attributes "kwargs".
The set of operators and the inputs/attributes they take
is documented at https://github.com/onnx/onnx/blob/master/docs/Operators.md
This function is monkey-patched onto Graph.
Args:
g: The Torch graph.
opname: The ONNX operator name, e.g., `Abs` or `Add`. TODO(justinchu): Update examples to correct ones.
raw_args: The inputs to the operator; usually provided
as arguments to the `symbolic` definition.
outputs: The number of outputs this operator returns.
By default an operator is assumed to return a single output.
If `outputs` is greater than one, this functions returns a tuple
of output `Node`, representing each output of the ONNX operator
in positional.
kwargs: The attributes of the ONNX operator, whose keys are named
according to the following convention: `alpha_f` indicates
the `alpha` attribute with type `f`. The valid type specifiers are
`f` (float), `i` (int), `s` (string) or `t` (Tensor). An attribute
specified with type float accepts either a single float, or a
list of floats (e.g., you would say `dims_i` for a `dims` attribute
that takes a list of integers).
Returns:
The node representing the single output of this operator (see the `outputs`
keyword argument for multi-return nodes).
"""
# Filter out None attributes, this can be convenient client side because
# now they can pass through None attributes, and have them not show up
kwargs = {k: v for k, v in kwargs.items() if v is not None}
def const_if_tensor(arg):
if arg is None:
return arg
elif isinstance(arg, torch._C.Value):
return arg
else:
return g.op("Constant", value_z=arg) # type: ignore[attr-defined]
args = [const_if_tensor(arg) for arg in raw_args]
n = g.insertNode(_new_node(g, opname, outputs, *args, **kwargs)) # type: ignore[attr-defined]
# Import utils to get _params_dict because it is a global that is accessed by c++ code
from torch.onnx import utils
if GLOBALS.onnx_shape_inference:
torch._C._jit_pass_onnx_node_shape_type_inference(
n, utils._params_dict, GLOBALS.export_onnx_opset_version
)
if outputs == 1:
return n.output()
return tuple(n.outputs())
# Generate an ONNX ATen op node.
def _aten_op(g, operator: str, *args, overload_name: str = "", **kwargs):
kwargs["aten"] = True
return g.op(
"ATen", *args, operator_s=operator, overload_name_s=overload_name, **kwargs
)
def _block_op(b: _C.Block, opname: str, *args, **kwargs):
if "::" in opname:
aten = False
ns_opname = opname
else:
aten = kwargs.pop("aten", False)
ns = "aten" if aten else "onnx"
ns_opname = ns + "::" + opname
n = b.addNode(ns_opname, list(args))
for k, v in sorted(kwargs.items()):
# TODO: enable inplace in aten exporting mode.
if k == "inplace":
continue
_add_attribute(n, k, v, aten=aten)
if len(list(n.outputs())) == 1:
return n.output()
return tuple(o for o in n.outputs())
def _new_node(g: torch._C.Graph, opname: str, outputs, *args, **kwargs):
if "::" in opname:
aten = False
ns_opname = opname
else:
aten = kwargs.pop("aten", False)
ns = "aten" if aten else "onnx"
ns_opname = ns + "::" + opname
n = g.create(ns_opname, args, outputs) # type: ignore[attr-defined]
for k, v in sorted(kwargs.items()):
# TODO: enable inplace in aten exporting mode.
if k == "inplace":
continue
_add_attribute(n, k, v, aten=aten)
return n
_attr_pattern = re.compile("^(.+)_(([ifstgz])|(ty))$")
def _is_onnx_list(value):
return (
not isinstance(value, torch._six.string_classes)
and not isinstance(value, torch.Tensor)
and isinstance(value, Iterable)
)
def _scalar(x: torch.Tensor):
"""Convert a scalar tensor into a Python value."""
assert x.numel() == 1
return x[0]
def _is_caffe2_aten_fallback():
return (
GLOBALS.operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
and _C_onnx._CAFFE2_ATEN_FALLBACK
)
def _add_attribute(node: _C.Node, key: str, value: Any, aten: bool):
r"""Initializes the right attribute based on type of value."""
m = _attr_pattern.match(key)
if m is None:
raise IndexError(
f"Invalid attribute specifier '{key}' names "
" must be suffixed with type, e.g. 'dim_i' or 'dims_i'"
)
name, kind = m.group(1), m.group(2)
if _is_onnx_list(value):
kind += "s"
if aten and _is_caffe2_aten_fallback():
if isinstance(value, torch.Tensor):
# Caffe2 proto does not support tensor attribute.
if value.numel() > 1:
raise ValueError("Should not pass tensor attribute")
value = _scalar(value)
if isinstance(value, float):
kind = "f"
else:
kind = "i"
return getattr(node, kind + "_")(name, value)
# TODO(#76254): Remove the deprecated function.
@_deprecation.deprecated(
"1.13", "1.14", "Use 'g.op()' to create a constant node instead."
)
def _graph_constant(
g,
value,
dims,
type_: str,
*args,
**kwargs,
):
"""This helper function can create either constant tensor or constant scalar.
If dims is None or 0 or [0], generate a 0-d tensor (scalar).
"""
assert isinstance(value, numbers.Number)
assert type_ is not None
isscalar = False
if dims is None or dims == 0 or set(dims) == {0}:
dims = [1]
isscalar = True
type_ = type_.lower()
tensor: Union[
torch.CharTensor,
torch.ShortTensor,
torch.IntTensor,
torch.LongTensor,
torch.HalfTensor,
torch.FloatTensor,
torch.DoubleTensor,
]
if type_ == "char":
tensor = torch.CharTensor(*dims)
elif type_ == "short":
tensor = torch.ShortTensor(*dims)
elif type_ == "int":
tensor = torch.IntTensor(*dims)
elif type_ == "long":
tensor = torch.LongTensor(*dims)
elif type_ == "half":
tensor = torch.HalfTensor(*dims)
elif type_ == "float":
tensor = torch.FloatTensor(*dims)
elif type_ == "double":
tensor = torch.DoubleTensor(*dims)
else:
raise ValueError(
"Unknown type, type should be one of the following strings: "
"char, short, int, long, half, float, double"
)
tensor.fill_(value) # type: ignore[call-overload]
if isscalar:
return g.op("Constant", *args, value_z=tensor, **kwargs)
return g.op("Constant", *args, value_t=tensor, **kwargs)
# TODO(#76254): Remove the deprecated function.
@_deprecation.deprecated(
"1.13",
"1.14",
"Internally use '_node_get' in symbolic_helper instead.",
)
def _node_getitem(self, k):
"""Gets attributes of a node which is polymorphic over return type.
This is monkey-patched onto Node.
"""
sel = self.kindOf(k)
return getattr(self, sel)(k)
torch._C.Graph.op = _graph_op # type: ignore[attr-defined]
torch._C.Graph.at = _aten_op # type: ignore[attr-defined]
torch._C.Block.op = _block_op # type: ignore[attr-defined]
torch._C.Graph.constant = _graph_constant # type: ignore[attr-defined]
torch._C.Node.__getitem__ = _node_getitem # type: ignore[attr-defined, misc, assignment]
| pytorch-master | torch/onnx/_patch_torch.py |
"""Utility for deprecating functions."""
import functools
import warnings
def deprecated(since: str, removed_in: str, instructions: str):
"""Marks functions as deprecated.
It will result in a warning when the function is called.
Args:
since: The version when the function was first deprecated.
removed_in: The version when the function will be removed.
instructions: The action users should take.
"""
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
warnings.warn(
f"`{function.__module__}.{function.__name__}` is deprecated in version {since} and will be "
f"removed in version {removed_in}. Please {instructions}.",
category=FutureWarning,
stacklevel=2,
)
return function(*args, **kwargs)
return wrapper
return decorator
| pytorch-master | torch/onnx/_deprecation.py |
from __future__ import annotations
from typing import Dict
from torch import _C as _C
class ExportTypes:
r"""Specifies how the ONNX model is stored."""
PROTOBUF_FILE = "Saves model in the specified protobuf file."
ZIP_ARCHIVE = "Saves model in the specified ZIP file (uncompressed)."
COMPRESSED_ZIP_ARCHIVE = "Saves model in the specified ZIP file (compressed)."
DIRECTORY = "Saves model in the specified folder."
class SymbolicContext:
r"""Provides extra context for symbolic functions.
Args:
params_dict (Dict[str, _C.IValue]): Mapping from graph initializer name to IValue.
env (Dict[_C.Value, _C.Value]): Mapping from Torch domain graph Value to ONNX domain graph Value.
cur_node (_C.Node): Current node being converted to ONNX domain.
onnx_block (_C.Block): Current ONNX block that converted nodes are being appended to.
"""
def __init__(self, params_dict, env, cur_node, onnx_block):
self.params_dict: Dict[str, _C.IValue] = params_dict
self.env: Dict[_C.Value, _C.Value] = env
# Current node that is being converted.
self.cur_node: _C.Node = cur_node
# Current onnx block that converted nodes are being appended to.
self.onnx_block: _C.Block = onnx_block
| pytorch-master | torch/onnx/_exporter_states.py |
"""ONNX exporter."""
import warnings
from torch import _C
from torch._C import _onnx as _C_onnx
from torch._C._onnx import (
_CAFFE2_ATEN_FALLBACK,
OperatorExportTypes,
TensorProtoDataType,
TrainingMode,
)
from . import ( # usort:skip. Keep the order instead of sorting lexicographically
_deprecation,
errors,
symbolic_caffe2,
symbolic_helper,
symbolic_opset7,
symbolic_opset8,
symbolic_opset9,
symbolic_opset10,
symbolic_opset11,
symbolic_opset12,
symbolic_opset13,
symbolic_opset14,
symbolic_opset15,
symbolic_opset16,
symbolic_registry,
utils,
)
from ._exporter_states import ExportTypes, SymbolicContext
from ._type_utils import JitScalarType
from .errors import CheckerError # Backwards compatibility
from .utils import (
_optimize_graph,
_run_symbolic_function,
_run_symbolic_method,
export,
export_to_pretty_string,
is_in_onnx_export,
register_custom_op_symbolic,
select_model_mode_for_export,
unregister_custom_op_symbolic,
)
__all__ = [
# Modules
"symbolic_helper",
"symbolic_registry",
"utils",
"errors",
# All opsets
"symbolic_caffe2",
"symbolic_opset7",
"symbolic_opset8",
"symbolic_opset9",
"symbolic_opset10",
"symbolic_opset11",
"symbolic_opset12",
"symbolic_opset13",
"symbolic_opset14",
"symbolic_opset15",
"symbolic_opset16",
# Enums
"ExportTypes",
"OperatorExportTypes",
"TrainingMode",
"TensorProtoDataType",
"JitScalarType",
# Classes
"SymbolicContext",
# Public functions
"export",
"export_to_pretty_string",
"is_in_onnx_export",
"select_model_mode_for_export",
"register_custom_op_symbolic",
"unregister_custom_op_symbolic",
"disable_log",
"enable_log",
"is_onnx_log_enabled",
"log",
"set_log_stream",
# Errors
"CheckerError", # Backwards compatibility
]
# Set namespace for exposed private names
ExportTypes.__module__ = "torch.onnx"
SymbolicContext.__module__ = "torch.onnx"
JitScalarType.__module__ = "torch.onnx"
producer_name = "pytorch"
producer_version = _C_onnx.PRODUCER_VERSION
@_deprecation.deprecated(
since="1.12.0", removed_in="TBD", instructions="use `torch.onnx.export` instead"
)
def _export(*args, **kwargs):
return utils._export(*args, **kwargs)
def is_onnx_log_enabled() -> bool:
r"""Returns True iff ONNX logging is turned on."""
return _C._jit_is_onnx_log_enabled()
def enable_log() -> None:
r"""Enables ONNX logging."""
_C._jit_set_onnx_log_enabled(True)
def disable_log() -> None:
r"""Disables ONNX logging."""
_C._jit_set_onnx_log_enabled(False)
def set_log_stream(stream_name: str = "stdout") -> None:
r"""Sets output stream for ONNX logging.
Args:
stream_name (str, default "stdout"): Only 'stdout' and 'stderr' are supported
as ``stream_name``.
"""
_C._jit_set_onnx_log_output_stream(stream_name)
def log(*args) -> None:
r"""A simple logging facility for ONNX exporter.
Args:
args: Arguments are converted to string, concatenated together with a newline
character appended to the end, and flushed to output stream.
"""
_C._jit_onnx_log(*args)
| pytorch-master | torch/onnx/__init__.py |
"""
Note [ONNX operators that are added/updated from opset 8 to opset 9]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
New operators:
Compress
ConstantOfShape
EyeLike
MaxUnpool
OneHot
Sinh
Cosh
Asinh
Acosh
Atanh
Shrink
IsNaN
Sign
Erf
Scatter
Where
NonZero
TfIdfVectorizer
MeanVarianceNormalization
Updated operators:
BatchNormalization: removed spatial attribute.
Greater, Less, Constant, MatMul, PRelu, Gemm, Flatten: more data types{integers} supported.
Cast: more data types{string} supported.
Upsample: moved scales from attribute to input.
Scan
"""
import warnings
import torch
from torch.onnx import _type_utils, symbolic_helper, symbolic_opset9 as opset9
block_listed_operators = [
"nonzero",
"where",
"scatter",
"scatter_add",
"erf",
"sign",
"isnan",
"gather",
"arange",
"masked_fill",
"index_fill",
"index_copy",
"repeat_interleave",
"isnan",
"any",
"all",
]
for block_listed_op in block_listed_operators:
vars()[block_listed_op] = symbolic_helper._block_list_in_opset(block_listed_op)
vars()[block_listed_op].__module__ = "torch.onnx.symbolic_opset8"
def _interpolate(name, dim, interpolate_mode):
def symbolic_fn(g, input, output_size, *args):
scales, align_corners = symbolic_helper._get_interpolate_attributes(
g, interpolate_mode, args
)
symbolic_helper._interpolate_warning(interpolate_mode)
align_corners = symbolic_helper._maybe_get_scalar(align_corners)
if align_corners:
return symbolic_helper._unimplemented(name, "align_corners == True")
output_size = symbolic_helper._maybe_get_const(output_size, "is")
if symbolic_helper._is_value(output_size):
return symbolic_helper._unimplemented(
name, "torch._C.Value (output_size) indexing"
)
if scales is None:
scales = [
1.0
if i < 2
else float(output_size[-(dim - i)])
/ float(input.type().sizes()[-(dim - i)])
for i in range(0, dim)
]
return g.op("Upsample", input, mode_s=interpolate_mode, scales_f=scales)
return symbolic_fn
upsample_nearest1d = _interpolate("upsample_nearest1d", 3, "nearest")
upsample_nearest2d = _interpolate("upsample_nearest2d", 4, "nearest")
upsample_nearest3d = _interpolate("upsample_nearest3d", 5, "nearest")
upsample_linear1d = _interpolate("upsample_linear1d", 3, "linear")
upsample_bilinear2d = _interpolate("upsample_bilinear2d", 4, "linear")
upsample_trilinear3d = _interpolate("upsample_trilinear3d", 5, "linear")
def __interpolate(
g, input, size, scale_factor, mode, align_corners, recompute_scale_factor, antialias
):
align_corners = symbolic_helper._maybe_get_const(align_corners, "b")
if not symbolic_helper._is_none(align_corners) and align_corners:
return symbolic_helper._unimplemented("interpolate", "align_corners == True")
if not symbolic_helper._is_none(scale_factor) and symbolic_helper._is_value(
scale_factor
):
return symbolic_helper._unimplemented(
"interpolate", "dynamic scales in opset 8"
)
if not symbolic_helper._is_none(size) and symbolic_helper._is_value(size):
return symbolic_helper._unimplemented("interpolate", "dynamic size in opset 8")
scales, mode = symbolic_helper._interpolate_get_scales_and_mode(
g, input, size, scale_factor, mode, align_corners
)
return g.op("Upsample", input, mode_s=mode, scales_f=scales)
# NOTE: We should create a wrapper for this kind of operation, after resolving the shape/type propagation
# issue for "cast" operators. Some symbolic functions depend on shape information of input tensor, which
# is lost after casting.
def _try_cast_integer_to_float(g, *args):
floating_scalar_types = ["Half", "Float", "Double"]
old_type = None
# Cast the input tensor to Float if its scalarType is known and is not floating number.
# If casting is performed, return the old scalarType, otherwise return None.
arg0_type = args[0].type().scalarType()
if arg0_type is not None:
old_type = arg0_type
if old_type not in floating_scalar_types:
# TODO(justinchuby): Remove the type ignore hint once _cast_Float is
# properly defined.
# NOTE: _cast_Float is generated programmatically so we need to make the
# type checker happy with ignore[attr-defined].
args = tuple(opset9._cast_Float(g, arg, False) for arg in args) # type: ignore[attr-defined]
else:
return (None,) + args
else:
warnings.warn(
"Only floating datatype is supported for these operators: "
"{Greater, Less, MatMul, PRelu, Gemm, Flatten}. This might cause "
"the onnx model to be incorrect, if inputs have integer datatypes."
)
return (old_type,) + args
def _cast_to_type(g, input, to_type):
if to_type is None:
return input
return getattr(opset9, f"_cast_{to_type}")(g, input, False)
def _comparison_operator(g, input, other, op_name):
other = symbolic_helper._maybe_get_scalar(other)
other = symbolic_helper._if_scalar_type_as(g, other, input)
_, input, other = _try_cast_integer_to_float(g, input, other)
return g.op(op_name, input, other)
# NOTE: For symbolics {gt, lt, bmm, matmul, prelu, mm, addmm, view, flatten},
# integer input type not supported in opset8. Cast to float if possible.
def gt(g, input, other):
return _comparison_operator(g, input, other, "Greater")
def lt(g, input, other):
return _comparison_operator(g, input, other, "Less")
def bmm(g, self, other):
if symbolic_helper._try_get_scalar_type(self):
old_type, self, other = _try_cast_integer_to_float(g, self, other)
return _cast_to_type(g, g.op("MatMul", self, other), old_type)
else:
return g.op("MatMul", self, other)
def matmul(g, self, other):
return bmm(g, self, other)
def prelu(g, self, weight):
self_rank = symbolic_helper._get_tensor_rank(self)
weight_sizes = symbolic_helper._get_tensor_sizes(weight)
if self_rank is not None and self_rank > 2:
weight = g.op("Unsqueeze", weight, axes_i=list(range(1, self_rank - 1)))
elif self_rank == 0 and weight_sizes == [1]:
# self and weight are both scalar but weight has rank == 1, squeeze weight.
weight = symbolic_helper._squeeze_helper(g, weight, [0])
if symbolic_helper._try_get_scalar_type(self):
old_type, self, weight = _try_cast_integer_to_float(g, self, weight)
return _cast_to_type(g, g.op("PRelu", self, weight), old_type)
else:
return g.op("PRelu", self, weight)
def mm(g, self, other):
# Create a dummy C tensor. Only needed for API purposes, the value is
# since beta = 0
scalar_type = symbolic_helper._try_get_scalar_type(self, other)
if scalar_type is None:
raise ValueError("mm can only operate on tensors with known types")
zero_constant = g.op(
"Constant",
value_t=torch.tensor(
[0], dtype=_type_utils.JitScalarType.from_name(scalar_type).dtype()
),
)
if symbolic_helper._try_get_scalar_type(self):
old_type, self, other, zero_constant = _try_cast_integer_to_float(
g, self, other, zero_constant
)
return _cast_to_type(
g,
g.op("Gemm", self, other, zero_constant, beta_f=0.0, alpha_f=1.0),
old_type,
)
return g.op("Gemm", self, other, zero_constant, beta_f=0.0, alpha_f=1.0)
@symbolic_helper.parse_args("v", "v", "v", "t", "t")
def addmm(g, self, mat1, mat2, beta, alpha):
if symbolic_helper._try_get_scalar_type(self):
old_type, self, mat1, mat2 = _try_cast_integer_to_float(g, self, mat1, mat2)
return _cast_to_type(
g,
g.op(
"Gemm",
mat1,
mat2,
self,
beta_f=symbolic_helper._scalar(beta),
alpha_f=symbolic_helper._scalar(alpha),
),
old_type,
)
else:
return g.op(
"Gemm",
mat1,
mat2,
self,
beta_f=symbolic_helper._scalar(beta),
alpha_f=symbolic_helper._scalar(alpha),
)
def flatten(g, input, start_dim, end_dim):
start_dim_i = symbolic_helper._get_const(start_dim, "i", "start_dim")
end_dim_i = symbolic_helper._get_const(end_dim, "i", "end_dim")
dim = input.type().dim()
if end_dim_i < 0:
end_dim_i = dim + end_dim_i
# use ONNX's Flatten operator for cases where the output shape is 2D
if start_dim_i == 1 and end_dim_i == dim - 1:
if symbolic_helper._try_get_scalar_type(input):
old_type, input = _try_cast_integer_to_float(g, input)
return _cast_to_type(
g, g.op("Flatten", input, axis_i=start_dim_i), old_type
)
else:
return g.op("Flatten", input, axis_i=start_dim_i)
if start_dim_i == 0 and end_dim_i == dim - 2:
if symbolic_helper._try_get_scalar_type(input):
old_type, input = _try_cast_integer_to_float(g, input)
return _cast_to_type(
g, g.op("Flatten", input, axis_i=end_dim_i + 1), old_type
)
else:
return g.op("Flatten", input, axis_i=end_dim_i + 1)
return opset9.flatten(g, input, start_dim, end_dim)
def _constant_fill(g, sizes, dtype: int, const_value):
if dtype is None:
scalar_type = _type_utils.JitScalarType.FLOAT
else:
scalar_type = _type_utils.JitScalarType(dtype)
if not scalar_type.dtype().is_floating_point:
result = g.op(
"ConstantFill",
sizes,
dtype_i=_type_utils.JitScalarType.FLOAT.onnx_type(),
input_as_shape_i=1,
value_f=const_value,
)
return g.op("Cast", result, to_i=scalar_type.onnx_type())
else:
return g.op(
"ConstantFill",
sizes,
dtype_i=scalar_type.onnx_type(),
input_as_shape_i=1,
value_f=const_value,
)
@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v")
def empty(g, sizes, dtype, layout, device, pin_memory=False, memory_format=None):
return zeros(g, sizes, dtype, layout, device, pin_memory)
@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v")
def empty_like(g, input, dtype, layout, device, pin_memory=False, memory_format=None):
return zeros_like(g, input, dtype, layout, device, pin_memory)
@symbolic_helper.parse_args("v", "i", "v", "v", "v")
def zeros(g, sizes, dtype, layout, device, pin_memory=False):
# NOTE: no way to set device and layout in ONNX, so we ignore it
return _constant_fill(g, sizes, dtype, 0)
@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v")
def zeros_like(g, input, dtype, layout, device, pin_memory=False, memory_format=None):
shape = g.op("Shape", input)
return _constant_fill(g, shape, dtype, 0)
@symbolic_helper.parse_args("v", "i", "v", "v", "v")
def ones(g, sizes, dtype, layout, device, pin_memory=False):
return _constant_fill(g, sizes, dtype, 1)
@symbolic_helper.parse_args("v", "i", "v", "v", "v", "v")
def ones_like(g, input, dtype, layout, device, pin_memory=False, memory_format=None):
shape = g.op("Shape", input)
return _constant_fill(g, shape, dtype, 1)
def full(g, sizes, value, dtype, layout, device, pin_memory=False):
const_value = symbolic_helper._maybe_get_const(value, "t")
if symbolic_helper._is_value(const_value):
tmp = zeros(g, sizes, dtype, layout, device)
return opset9.add(g, tmp, value, g.op("Constant", value_t=torch.tensor(1)))
else:
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
return _constant_fill(g, sizes, dtype, const_value)
@symbolic_helper.parse_args("v", "f", "i", "v", "v", "v", "v")
def full_like(
g, input, fill_value, dtype, layout, device, pin_memory=False, memory_format=None
):
shape = g.op("Shape", input)
return _constant_fill(g, shape, dtype, fill_value)
def repeat(g, self, repeats):
if not symbolic_helper._is_value(repeats):
repeats = g.op("Constant", value_t=torch.LongTensor(repeats))
if symbolic_helper._is_packed_list(repeats):
repeat_size_len = len(symbolic_helper._unpack_list(repeats))
else:
const_repeats = symbolic_helper._maybe_get_const(repeats, "is")
repeat_size_len = len(const_repeats)
if self.isCompleteTensor():
sizes = self.type().sizes()
diff_dims = repeat_size_len - len(sizes)
if diff_dims > 0:
self = opset9.view(
g, self, g.op("Constant", value_t=torch.tensor([1] * diff_dims + sizes))
)
return g.op("Tile", self, repeats)
| pytorch-master | torch/onnx/symbolic_opset8.py |
"""Functions to export models into the ONNX IR format.
These models can be loaded with the ONNX library and then
converted to models which run on other deep learning frameworks.
"""
from __future__ import annotations
import contextlib
import copy
import inspect
import io
import itertools
import os
import re
import textwrap
import typing
import warnings
import zipfile
from typing import (
Any,
Callable,
cast,
Collection,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import torch
import torch._C._onnx as _C_onnx
import torch.jit._trace
import torch.serialization
from torch import _C
from torch.onnx import ( # noqa: F401
_constants,
_exporter_states,
_patch_torch,
errors,
symbolic_caffe2,
symbolic_helper,
symbolic_registry,
)
from torch.onnx._globals import GLOBALS
__all__ = [
"is_in_onnx_export",
"select_model_mode_for_export",
"disable_apex_o2_state_dict_hook",
"setup_onnx_logging",
"exporter_context",
"export",
"warn_on_static_input_change",
"unpack_quantized_tensor",
"export_to_pretty_string",
"unconvertible_ops",
"get_ns_op_name_from_custom_op",
"register_custom_op_symbolic",
"unregister_custom_op_symbolic",
]
def is_in_onnx_export() -> bool:
"""Returns whether it is in the middle of ONNX export."""
return GLOBALS.in_onnx_export
# TODO(justinchuby): Remove dependency to this global variable from constant_fold.cpp
# Skip check due to cannot import IValue from torch._C
_params_dict = {} # type: ignore[var-annotated]
@contextlib.contextmanager
def select_model_mode_for_export(model, mode: _C_onnx.TrainingMode):
r"""A context manager to temporarily set the training mode of ``model``
to ``mode``, resetting it when we exit the with-block.
Args:
model: Same type and meaning as ``model`` arg to :func:`export`.
mode: Same type and meaning as ``training`` arg to :func:`export`.
"""
if not isinstance(mode, _C_onnx.TrainingMode):
raise TypeError(
f"'mode' should be a torch.onnx.TrainingMode enum, but got '{type(mode)}'."
)
originally_training: bool = False
if not isinstance(model, torch.jit.ScriptFunction):
originally_training = model.training
# ONNX opset 12 has better support for training amenable models, with updated
# versions of the dropout and batch_norm operators
if mode == _C_onnx.TrainingMode.TRAINING or (
mode == _C_onnx.TrainingMode.PRESERVE and originally_training
):
GLOBALS.export_training = True
if GLOBALS.export_onnx_opset_version < 12:
warnings.warn(
"You are exporting the model in training mode with onnx opset "
f"version {GLOBALS.export_onnx_opset_version}. "
"Opset versions lower than opset 12 will not be able to export "
"nodes such as Dropout and BatchNorm correctly."
)
else:
GLOBALS.export_training = False
GLOBALS.training_mode = mode
if mode == _C_onnx.TrainingMode.TRAINING:
model.train(True)
elif mode == _C_onnx.TrainingMode.EVAL:
model.train(False)
# else mode == _C_onnx.TrainingMode.PRESERVE, do nothing
try:
yield
finally:
if not (
isinstance(model, torch.jit.ScriptFunction)
or mode == _C_onnx.TrainingMode.PRESERVE
):
model.train(originally_training)
@contextlib.contextmanager
def disable_apex_o2_state_dict_hook(
model: Union[torch.nn.Module, torch.jit.ScriptFunction]
):
# Apex O2 hook state_dict to return fp16 weights as fp32.
# Exporter cannot identify them as same tensors.
# Since this hook is only used by optimizer, it is safe to
# remove this hook while exporting.
if not isinstance(model, torch.jit.ScriptFunction):
model_hooks = {} # type: ignore[var-annotated]
for module in model.modules():
for key, hook in module._state_dict_hooks.items():
if type(hook).__name__ == "O2StateDictHook":
if module not in model_hooks:
model_hooks[module] = {}
model_hooks[module][key] = hook
if module in model_hooks:
for key in model_hooks[module]:
module._state_dict_hooks.pop(key)
try:
yield
finally:
# Add the hooks back
for module, m_map in model_hooks.items():
for key, hook in m_map.items():
module._state_dict_hooks[key] = hook
else:
try:
yield
finally:
pass
@contextlib.contextmanager
def setup_onnx_logging(verbose):
is_originally_enabled = torch.onnx.is_onnx_log_enabled()
if is_originally_enabled or verbose:
torch.onnx.enable_log()
try:
yield
finally:
if not is_originally_enabled:
torch.onnx.disable_log()
@contextlib.contextmanager
def exporter_context(model, mode, verbose):
with select_model_mode_for_export(
model, mode
) as mode_ctx, disable_apex_o2_state_dict_hook(
model
) as apex_ctx, setup_onnx_logging(
verbose
) as log_ctx:
yield (mode_ctx, apex_ctx, log_ctx)
def export(
model: Union[torch.nn.Module, torch.jit.ScriptModule, torch.jit.ScriptFunction],
args: Union[Tuple[Any, ...], torch.Tensor],
f: Union[str, io.BytesIO],
export_params: bool = True,
verbose: bool = False,
training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL,
input_names: Optional[Sequence[str]] = None,
output_names: Optional[Sequence[str]] = None,
operator_export_type: _C_onnx.OperatorExportTypes = _C_onnx.OperatorExportTypes.ONNX,
opset_version: Optional[int] = None,
do_constant_folding: bool = True,
dynamic_axes: Optional[
Union[Mapping[str, Mapping[int, str]], Mapping[str, Sequence[int]]]
] = None,
keep_initializers_as_inputs: Optional[bool] = None,
custom_opsets: Optional[Mapping[str, int]] = None,
export_modules_as_functions: Union[bool, Collection[Type[torch.nn.Module]]] = False,
) -> None:
r"""Exports a model into ONNX format.
If ``model`` is not a :class:`torch.jit.ScriptModule` nor a
:class:`torch.jit.ScriptFunction`, this runs
``model`` once in order to convert it to a TorchScript graph to be exported
(the equivalent of :func:`torch.jit.trace`). Thus this has the same limited support
for dynamic control flow as :func:`torch.jit.trace`.
Args:
model (torch.nn.Module, torch.jit.ScriptModule or torch.jit.ScriptFunction):
the model to be exported.
args (tuple or torch.Tensor):
args can be structured either as:
1. ONLY A TUPLE OF ARGUMENTS::
args = (x, y, z)
The tuple should contain model inputs such that ``model(*args)`` is a valid
invocation of the model. Any non-Tensor arguments will be hard-coded into the
exported model; any Tensor arguments will become inputs of the exported model,
in the order they occur in the tuple.
2. A TENSOR::
args = torch.Tensor([1])
This is equivalent to a 1-ary tuple of that Tensor.
3. A TUPLE OF ARGUMENTS ENDING WITH A DICTIONARY OF NAMED ARGUMENTS::
args = (x,
{'y': input_y,
'z': input_z})
All but the last element of the tuple will be passed as non-keyword arguments,
and named arguments will be set from the last element. If a named argument is
not present in the dictionary, it is assigned the default value, or None if a
default value is not provided.
.. note::
If a dictionary is the last element of the args tuple, it will be
interpreted as containing named arguments. In order to pass a dict as the
last non-keyword arg, provide an empty dict as the last element of the args
tuple. For example, instead of::
torch.onnx.export(
model,
(x,
# WRONG: will be interpreted as named arguments
{y: z}),
"test.onnx.pb")
Write::
torch.onnx.export(
model,
(x,
{y: z},
{}),
"test.onnx.pb")
f: a file-like object (such that ``f.fileno()`` returns a file descriptor)
or a string containing a file name. A binary protocol buffer will be written
to this file.
export_params (bool, default True): if True, all parameters will
be exported. Set this to False if you want to export an untrained model.
In this case, the exported model will first take all of its parameters
as arguments, with the ordering as specified by ``model.state_dict().values()``
verbose (bool, default False): if True, prints a description of the
model being exported to stdout. In addition, the final ONNX graph will include the
field ``doc_string``` from the exported model which mentions the source code locations
for ``model``. If True, ONNX exporter logging will be turned on.
training (enum, default TrainingMode.EVAL):
* ``TrainingMode.EVAL``: export the model in inference mode.
* ``TrainingMode.PRESERVE``: export the model in inference mode if model.training is
False and in training mode if model.training is True.
* ``TrainingMode.TRAINING``: export the model in training mode. Disables optimizations
which might interfere with training.
input_names (list of str, default empty list): names to assign to the
input nodes of the graph, in order.
output_names (list of str, default empty list): names to assign to the
output nodes of the graph, in order.
operator_export_type (enum, default OperatorExportTypes.ONNX):
* ``OperatorExportTypes.ONNX``: Export all ops as regular ONNX ops
(in the default opset domain).
* ``OperatorExportTypes.ONNX_FALLTHROUGH``: Try to convert all ops
to standard ONNX ops in the default opset domain. If unable to do so
(e.g. because support has not been added to convert a particular torch op to ONNX),
fall back to exporting the op into a custom opset domain without conversion. Applies
to `custom ops <https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html>`_
as well as ATen ops. For the exported model to be usable, the runtime must support
these non-standard ops.
* ``OperatorExportTypes.ONNX_ATEN``: All ATen ops (in the TorchScript namespace "aten")
are exported as ATen ops (in opset domain "org.pytorch.aten").
`ATen <https://pytorch.org/cppdocs/#aten>`_ is PyTorch's built-in tensor library, so
this instructs the runtime to use PyTorch's implementation of these ops.
.. warning::
Models exported this way are probably runnable only by Caffe2.
This may be useful if the numeric differences in implementations of operators are
causing large differences in behavior between PyTorch and Caffe2 (which is more
common on untrained models).
* ``OperatorExportTypes.ONNX_ATEN_FALLBACK``: Try to export each ATen op
(in the TorchScript namespace "aten") as a regular ONNX op. If we are unable to do so
(e.g. because support has not been added to convert a particular torch op to ONNX),
fall back to exporting an ATen op. See documentation on OperatorExportTypes.ONNX_ATEN for
context.
For example::
graph(%0 : Float):
%3 : int = prim::Constant[value=0]()
# conversion unsupported
%4 : Float = aten::triu(%0, %3)
# conversion supported
%5 : Float = aten::mul(%4, %0)
return (%5)
Assuming ``aten::triu`` is not supported in ONNX, this will be exported as::
graph(%0 : Float):
%1 : Long() = onnx::Constant[value={0}]()
# not converted
%2 : Float = aten::ATen[operator="triu"](%0, %1)
# converted
%3 : Float = onnx::Mul(%2, %0)
return (%3)
If PyTorch was built with Caffe2 (i.e. with ``BUILD_CAFFE2=1``), then
Caffe2-specific behavior will be enabled, including special support
for ops are produced by the modules described in
`Quantization <https://pytorch.org/docs/stable/quantization.html>`_.
.. warning::
Models exported this way are probably runnable only by Caffe2.
opset_version (int, default 13): The version of the
`default (ai.onnx) opset <https://github.com/onnx/onnx/blob/master/docs/Operators.md>`_
to target. Must be >= 7 and <= 16.
do_constant_folding (bool, default True): Apply the constant-folding optimization.
Constant-folding will replace some of the ops that have all constant inputs
with pre-computed constant nodes.
dynamic_axes (dict<string, dict<int, string>> or dict<string, list(int)>, default empty dict):
By default the exported model will have the shapes of all input and output tensors
set to exactly match those given in ``args``. To specify axes of tensors as
dynamic (i.e. known only at run-time), set ``dynamic_axes`` to a dict with schema:
* KEY (str): an input or output name. Each name must also be provided in ``input_names`` or
``output_names``.
* VALUE (dict or list): If a dict, keys are axis indices and values are axis names. If a
list, each element is an axis index.
For example::
class SumModule(torch.nn.Module):
def forward(self, x):
return torch.sum(x, dim=1)
torch.onnx.export(SumModule(), (torch.ones(2, 2),), "onnx.pb",
input_names=["x"], output_names=["sum"])
Produces::
input {
name: "x"
...
shape {
dim {
dim_value: 2 # axis 0
}
dim {
dim_value: 2 # axis 1
...
output {
name: "sum"
...
shape {
dim {
dim_value: 2 # axis 0
...
While::
torch.onnx.export(SumModule(), (torch.ones(2, 2),), "onnx.pb",
input_names=["x"], output_names=["sum"],
dynamic_axes={
# dict value: manually named axes
"x": {0: "my_custom_axis_name"},
# list value: automatic names
"sum": [0],
})
Produces::
input {
name: "x"
...
shape {
dim {
dim_param: "my_custom_axis_name" # axis 0
}
dim {
dim_value: 2 # axis 1
...
output {
name: "sum"
...
shape {
dim {
dim_param: "sum_dynamic_axes_1" # axis 0
...
keep_initializers_as_inputs (bool, default None): If True, all the
initializers (typically corresponding to parameters) in the
exported graph will also be added as inputs to the graph. If False,
then initializers are not added as inputs to the graph, and only
the non-parameter inputs are added as inputs.
This may allow for better optimizations (e.g. constant folding) by
backends/runtimes.
If ``opset_version < 9``, initializers MUST be part of graph
inputs and this argument will be ignored and the behavior will be
equivalent to setting this argument to True.
If None, then the behavior is chosen automatically as follows:
* If ``operator_export_type=OperatorExportTypes.ONNX``, the behavior is equivalent
to setting this argument to False.
* Else, the behavior is equivalent to setting this argument to True.
custom_opsets (dict<str, int>, default empty dict): A dict with schema:
* KEY (str): opset domain name
* VALUE (int): opset version
If a custom opset is referenced by ``model`` but not mentioned in this dictionary,
the opset version is set to 1. Only custom opset domain name and version should be
indicated through this argument.
export_modules_as_functions (bool or set of type of nn.Module, default False): Flag to enable
exporting all ``nn.Module`` forward calls as local functions in ONNX. Or a set to indicate the
particular types of modules to export as local functions in ONNX.
This feature requires ``opset_version`` >= 15, otherwise the export will fail. This is because
``opset_version`` < 15 implies IR version < 8, which means no local function support.
Module variables will be exported as function attributes. There are two categories of function
attributes.
1. Annotated attributes: class variables that have type annotations via
`PEP 526-style <https://www.python.org/dev/peps/pep-0526/#class-and-instance-variable-annotations>`_
will be exported as attributes.
Annotated attributes are not used inside the subgraph of ONNX local function because
they are not created by PyTorch JIT tracing, but they may be used by consumers
to determine whether or not to replace the function with a particular fused kernel.
2. Inferred attributes: variables that are used by operators inside the module. Attribute names
will have prefix "inferred::". This is to differentiate from predefined attributes retrieved from
python module annotations. Inferred attributes are used inside the subgraph of ONNX local function.
* ``False``(default): export ``nn.Module`` forward calls as fine grained nodes.
* ``True``: export all ``nn.Module`` forward calls as local function nodes.
* Set of type of nn.Module: export ``nn.Module`` forward calls as local function nodes,
only if the type of the ``nn.Module`` is found in the set.
Raises:
CheckerError: If the ONNX checker detects an invalid ONNX graph. Will still export the
model to the file ``f`` even if this is raised.
"""
_export(
model,
args,
f,
export_params,
verbose,
training,
input_names,
output_names,
operator_export_type=operator_export_type,
opset_version=opset_version,
do_constant_folding=do_constant_folding,
dynamic_axes=dynamic_axes,
keep_initializers_as_inputs=keep_initializers_as_inputs,
custom_opsets=custom_opsets,
export_modules_as_functions=export_modules_as_functions,
)
def _is_constant_tensor_list(node):
if node.kind() != "prim::Constant":
return False
output_type = node.output().type()
if output_type.isSubtypeOf(_C.ListType.ofTensors()):
return True
if output_type.isSubtypeOf(_C.ListType(_C.OptionalType.ofTensor())):
return True
# ONNX can't handle constants that are lists of tensors, which can
# get generated in constant prop. So we split them back into prim::ListConstructs
def _split_tensor_list_constants(g, block):
for node in block.nodes():
for subblock in node.blocks():
_split_tensor_list_constants(g, subblock)
if _is_constant_tensor_list(node):
inputs = []
for val in node.output().toIValue():
input = g.insertConstant(val)
input.node().moveBefore(node)
input.node().copyMetadata(node)
inputs.append(input)
lc = (
g.create("prim::ListConstruct", inputs)
.insertBefore(node)
.output()
.setType(_C.ListType.ofTensors())
)
lc.node().copyMetadata(node)
node.output().replaceAllUsesWith(lc)
def _optimize_graph(
graph: _C.Graph,
operator_export_type: _C_onnx.OperatorExportTypes,
_disable_torch_constant_prop: bool = False,
fixed_batch_size: bool = False,
params_dict=None,
dynamic_axes=None,
input_names=None,
module=None,
):
# Inline everything
_C._jit_pass_inline(graph)
# Remove fork/wait nodes
_C._jit_pass_inline_fork_wait(graph)
_C._jit_pass_lint(graph)
_C._jit_pass_onnx_autograd_function_process(graph)
_C._jit_pass_lower_all_tuples(graph)
# we now record some ops like ones/zeros
# into a trace where we previously recorded constants.
# use constant prop to maintain our current level of onnx support
# without implementing symbolics for all of them
if _disable_torch_constant_prop is False:
_C._jit_pass_constant_propagation(graph)
_split_tensor_list_constants(graph, graph)
# run dce to eliminate dead parts of the graph that might have been
# left behind by things like symbolic_override
_C._jit_pass_dce(graph)
_C._jit_pass_lint(graph)
_C._jit_pass_canonicalize_graph_fuser_ops(graph)
_C._jit_pass_lint(graph)
_C._jit_pass_peephole(graph, True)
_C._jit_pass_fuse_addmm(graph)
_C._jit_pass_lint(graph)
_C._jit_pass_peephole(graph, True)
_C._jit_pass_lower_all_tuples(graph)
# in _jit_pass_onnx, symbolic functions are called for each node for conversion.
# However, there are nodes that cannot be converted without additional context.
# For example, the number of outputs from split (and whether it is static or dynamic) is unknown
# until the point where it is unpacked by listUnpack node.
# This pass does a preprocess, and prepares the nodes such that enough context can be received
# by the symbolic function.
_C._jit_pass_onnx_remove_inplace_ops_for_onnx(graph, module)
_C._jit_pass_onnx_preprocess(graph)
# onnx does not support tuples, so try to remove them
_C._jit_pass_lint(graph)
# onnx only supports tensors, but 1 / 2 = 0.5 and tensor(1) / tensor(2) = 0
_C._jit_pass_prepare_division_for_onnx(graph)
_C._jit_pass_onnx_remove_print(graph)
_C._jit_pass_onnx_preprocess_caffe2(graph)
symbolic_helper._quantized_ops.clear()
# Unpack quantized weights for conv and linear ops and insert into graph.
_C._jit_pass_onnx_unpack_quantized_weights(
graph, params_dict, symbolic_helper.is_caffe2_aten_fallback()
)
if symbolic_helper.is_caffe2_aten_fallback():
# Insert permutes before and after each conv op to ensure correct order.
_C._jit_pass_onnx_quantization_insert_permutes(graph, params_dict)
# Find consecutive permutes that are no-ops and remove them.
_C._jit_pass_custom_pattern_based_rewrite_graph(
textwrap.dedent(
"""\
graph(%Pi):
%Pq = quantized::nhwc2nchw(%Pi)
%Pr = quantized::nchw2nhwc(%Pq)
return (%Pr)"""
),
textwrap.dedent(
"""\
graph(%Ri):
return (%Ri)"""
),
graph,
)
# onnx only supports tensors, so we turn all out number types into tensors
_C._jit_pass_erase_number_types(graph)
if GLOBALS.onnx_shape_inference:
input_names = [] if input_names is None else input_names
dynamic_axes = {} if dynamic_axes is None else dynamic_axes
_C._jit_pass_onnx_set_dynamic_input_shape(graph, dynamic_axes, input_names)
_C._jit_pass_onnx_lint(graph)
graph = _C._jit_pass_onnx(graph, operator_export_type)
_C._jit_pass_onnx_lint(graph)
_C._jit_pass_lint(graph)
_C._jit_pass_onnx_scalar_type_analysis(
graph, True, GLOBALS.export_onnx_opset_version
)
_C._jit_pass_lint(graph)
_C._jit_pass_onnx_peephole(
graph, GLOBALS.export_onnx_opset_version, fixed_batch_size
)
_C._jit_pass_lint(graph)
# graph is not a valid jit graph anymore because types have been replaced
# (e.g. int with Tensor), so it now contains operators that don't actually
# exist. We can't run normal dead code elimination because it'd fail trying
# to look up if an operator has side effects, but we can run a dead code
# elimination variant that doesn't need to look up if an op has side effects.
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
_C._jit_pass_lint(graph)
graph = _C._jit_pass_canonicalize(graph)
_C._jit_pass_lint(graph)
if GLOBALS.onnx_shape_inference:
_C._jit_pass_onnx_graph_shape_type_inference(
graph, params_dict, GLOBALS.export_onnx_opset_version
)
return graph
def warn_on_static_input_change(input_states):
"""Warns that changes to input dictionaries and strings won't take effect in the traced ONNX graph.
We accept dictionaries and strings as ONNX inputs, but they should be only for
configuration use. we detect here if these inputs are modified, and if so we warn
the user that the changes won't take effect in the traced ONNX graph.
"""
for input, traced_input in zip(input_states[0], input_states[1]):
if isinstance(input, dict):
if list(input.keys()) != list(traced_input.keys()):
warning = (
"We detected that you are modifying a dictionary that is an input to your "
"model. "
"Note that dictionaries are allowed as inputs in ONNX but they should be "
"handled with care. "
"Usages of dictionaries is not recommended, and should not be used except "
"for configuration use. "
"Also note that the order and values of the keys must remain the same. "
)
warnings.warn(warning)
elif isinstance(input, str):
if input != traced_input:
warning = (
"The model seems to have string inputs/outputs. "
"Note that strings will not appear as inputs/outputs of the ONNX graph. "
)
warnings.warn(warning)
def _resolve_args_by_export_type(arg_name, arg_value, operator_export_type):
"""Resolves the arguments that are ignored when export_type != operator_export_type.ONNX."""
if (
operator_export_type is not operator_export_type.ONNX
and _C_onnx._CAFFE2_ATEN_FALLBACK
):
if arg_value is True:
warnings.warn(
f"'{arg_name}' can be set to True only when 'operator_export_type' is "
"`ONNX`. Since 'operator_export_type' is not set to 'ONNX', "
f"'{arg_name}' argument will be ignored."
)
arg_value = False
return arg_value
def _decide_keep_init_as_input(
keep_initializers_as_inputs: Optional[bool],
operator_export_type: _C_onnx.OperatorExportTypes,
opset_version: int,
):
"""Decides whether the initializers in the graph should be listed as ONNX graph inputs.
This method encapsulates the logic to decide whether the initializers in the graph
should be listed as ONNX graph inputs (i.e., whether to choose ONNX IR v3 or v4).
If keep_initializers_as_inputs is not specified (None), then we decide whether to keep
initializers as graph inputs (val_keep_init_as_ip) based on export type. If export type
is ONNX, then do not keep initializers as input (val_keep_init_as_ip=False). For all other
export types keep initializers as input (val_keep_init_as_ip=True).
If keep_initializers_as_inputs is specified, then respect it. Unless opset version <= 8,
in which case it must be ignored because for opset version <= 8, all initializers MUST be
part of graph input (only ONNX IR v3 is allowed), i.e. val_keep_init_as_ip=True.
Special handling is needed for opset version 8 or lower, because irrespective
of user input for keep_initializers_as_inputs, the graph must follow ONNX IR v3
semantics, i.e. all initializers must be listed as ONNX graph input.
"""
if opset_version < 9:
if keep_initializers_as_inputs is False:
warnings.warn(
"Setting 'keep_initializers_as_inputs=False' for opset version"
"8 or lower would lead to an invalid ONNX graph. Therefore, "
"'keep_initializers_as_inputs=False' is ignored during export."
"Exported model will have initializers as graph inputs (compliant "
" to ONNX IR v3)."
)
return True # i.e. True == initializers are part of graph input (ONNX IR v3)
val_keep_init_as_ip = (
True if keep_initializers_as_inputs is None else keep_initializers_as_inputs
)
if (
keep_initializers_as_inputs is None
and operator_export_type is _C_onnx.OperatorExportTypes.ONNX
):
val_keep_init_as_ip = False
return val_keep_init_as_ip
def _decide_add_node_names(add_node_names, operator_export_type):
return _resolve_args_by_export_type(
"add_node_names", add_node_names, operator_export_type
)
def _decide_constant_folding(do_constant_folding, operator_export_type, training):
do_constant_folding = _resolve_args_by_export_type(
"do_constant_folding", do_constant_folding, operator_export_type
)
if do_constant_folding and (
training is not None and training is not _C_onnx.TrainingMode.EVAL
):
warnings.warn(
"It is recommended that constant folding be turned off ('do_constant_folding=False') "
"when exporting the model in training-amenable mode, i.e. with 'training=TrainingMode.TRAIN' "
"or 'training=TrainingMode.PRESERVE' (when model is in training mode). Otherwise, some "
"learnable model parameters may not translate correctly in the exported ONNX model "
"because constant folding mutates model parameters. Please consider "
"turning off constant folding or setting the training=TrainingMode.EVAL."
)
return do_constant_folding
def _signature(model) -> inspect.Signature:
should_be_callable = getattr(model, "forward", model)
if callable(should_be_callable):
return inspect.signature(should_be_callable)
raise ValueError("model has no forward method and is not callable")
def _decide_input_format(model, args):
try:
sig = _signature(model)
except ValueError as e:
warnings.warn(f"{e}, skipping _decide_input_format")
return args
try:
ordered_list_keys = list(sig.parameters.keys())
if ordered_list_keys[0] == "self":
ordered_list_keys = ordered_list_keys[1:]
args_dict: Dict = {}
if isinstance(args, list):
args_list = args
elif isinstance(args, tuple):
args_list = list(args)
else:
args_list = [args]
if isinstance(args_list[-1], dict):
args_dict = args_list[-1]
args_list = args_list[:-1]
n_nonkeyword = len(args_list)
for optional_arg in ordered_list_keys[n_nonkeyword:]:
if optional_arg in args_dict:
args_list.append(args_dict[optional_arg])
# Check if this arg has a default value
else:
param = sig.parameters[optional_arg]
if param.default != param.empty:
args_list.append(param.default)
args = args_list if isinstance(args, list) else tuple(args_list)
# Cases of models with no input args
except IndexError:
warnings.warn("No input args, skipping _decide_input_format")
except Exception as e:
warnings.warn(f"Skipping _decide_input_format\n {e.args[0]}")
return args
def _trace(func, args, operator_export_type, return_outs=False):
# Special case for common case of passing a single Tensor
if isinstance(args, torch.Tensor):
args = (args,)
trace_graph, torch_out, inputs_states = torch.jit._get_trace_graph(
func, args, strict=False, _force_outplace=False, _return_inputs_states=True
)
warn_on_static_input_change(inputs_states)
trace_graph = _optimize_graph(trace_graph, operator_export_type, params_dict={})
if return_outs:
return trace_graph, torch_out
return trace_graph
def _trace_and_get_graph_from_model(model, args):
# A basic sanity check: make sure the state_dict keys are the same
# before and after running the model. Fail fast!
orig_state_dict_keys = torch.jit._unique_state_dict(model).keys()
trace_graph, torch_out, inputs_states = torch.jit._get_trace_graph(
model, args, strict=False, _force_outplace=False, _return_inputs_states=True
)
warn_on_static_input_change(inputs_states)
if orig_state_dict_keys != torch.jit._unique_state_dict(model).keys():
raise RuntimeError(
"state_dict changed after running the tracer; "
"something weird is happening in your model!"
)
return trace_graph, torch_out
def _get_param_count_list(method_graph, args_params):
param_count_list = []
for input_, arg_params_ in zip(method_graph.inputs(), args_params):
if "PackedParams" in str(input_.type()):
in_vars, _ = torch.jit._flatten(arg_params_)
param_count_list.append(len(in_vars))
else:
param_count_list.append(arg_params_ is not None)
return param_count_list
def _check_flatten_did_not_remove(original, jit_flattened):
"""torch.jit._flatten removes None. Check if it did so in this case."""
def flatten(x):
if isinstance(x, (list, tuple)):
for inner in x:
yield from flatten(inner)
elif isinstance(x, dict):
for inner in x.values():
yield from flatten(inner)
else:
yield x
flattened_with_none = list(flatten(original))
num_none = len(flattened_with_none) - len(jit_flattened)
assert num_none >= 0
if num_none:
raise ValueError(
f"args contained {num_none} None's after flattening. "
"When exporting a ScriptModule or ScriptFunction, no args may "
"be None because that breaks type propagation."
)
def _create_jit_graph(
model: Union[torch.nn.Module, torch.jit.ScriptFunction], args: Sequence[Any]
) -> Tuple[
_C.Graph,
List[_C.IValue],
Optional[Any],
Optional[Union[_C.ScriptModule, _C.ScriptFunction]],
]:
if isinstance(model, (torch.jit.ScriptFunction, torch.jit.ScriptModule)):
flattened_args = tuple(torch.jit._flatten(tuple(args))[0])
_check_flatten_did_not_remove(args, flattened_args)
torch_out = None
if isinstance(model, torch.jit.ScriptModule):
try:
graph = model.forward.graph
except AttributeError as e:
raise RuntimeError("'forward' method must be a script method") from e
_C._jit_pass_onnx_function_substitution(graph)
freezed_module = _C._freeze_module(
cast(_C.ScriptModule, model._c), preserveParameters=True
)
module, params = _C._jit_onnx_list_model_parameters(freezed_module)
method_graph = module._get_method("forward").graph
args_params = tuple(args) + tuple(params)
param_count_list = _get_param_count_list(method_graph, args_params)
in_vars, _ = torch.jit._flatten(args_params)
graph = _C._propagate_and_assign_input_shapes(
method_graph, tuple(in_vars), param_count_list, False, False
)
return graph, params, torch_out, module
# torch.jit.ScriptFunction
params = []
graph = model.graph
_C._jit_pass_onnx_function_substitution(graph)
param_count_list = _get_param_count_list(graph, args)
graph = _C._propagate_and_assign_input_shapes(
graph, flattened_args, param_count_list, False, False
)
return graph, params, torch_out, None
graph, torch_out = _trace_and_get_graph_from_model(model, args)
_C._jit_pass_onnx_lint(graph)
state_dict = torch.jit._unique_state_dict(model)
params = list(state_dict.values())
graph_inputs = list(graph.inputs())
user_input_num = len(graph_inputs) - len(state_dict)
param_names = list(state_dict.keys())
for i, inp in enumerate(graph_inputs):
if i >= user_input_num:
inp.setDebugName(param_names[i - user_input_num])
_C._jit_pass_onnx_function_substitution(graph)
return graph, params, torch_out, None
def _get_named_param_dict(graph, params):
input_and_param_names = [val.debugName() for val in graph.inputs()]
param_names = input_and_param_names[len(input_and_param_names) - len(params) :]
_params_dict = dict(zip(param_names, params))
return _params_dict
def _get_example_outputs(model, args):
input_args = copy.deepcopy(args)
input_kwargs = {}
if input_args and isinstance(input_args[-1], dict):
input_kwargs = input_args[-1]
input_args = input_args[:-1]
example_outputs = model(*input_args, **input_kwargs)
if isinstance(example_outputs, list):
example_outputs = [example_outputs]
elif not isinstance(example_outputs, tuple):
example_outputs = (example_outputs,)
return example_outputs
_qtype_vtype_map = {
torch.quint8: torch.uint8,
torch.qint8: torch.int8,
torch.qint32: torch.int32,
torch.quint4x2: torch.int8,
}
def unpack_quantized_tensor(value, cast_onnx_accepted=True):
if isinstance(value, torch.Tensor) and value.dtype in _qtype_vtype_map:
q_value_dequantize = value.dequantize()
q_scale = (
torch.tensor(value.q_scale(), dtype=torch.double)
if cast_onnx_accepted
else torch.tensor(value.q_scale(), dtype=torch.float32)
)
q_zero_point = (
torch.tensor(value.q_zero_point(), dtype=torch.int64)
if cast_onnx_accepted
else torch.tensor(value.q_zero_point(), dtype=_qtype_vtype_map[value.dtype])
)
q_value = q_value_dequantize / q_scale + q_zero_point
q_value = q_value.to(dtype=_qtype_vtype_map[value.dtype])
return q_value, q_scale, q_zero_point
else:
return (value,)
def _pre_trace_quant_model(model, args):
r"""Returns `torch.jit.trace(model, args)` if model is quantized. Otherwise do nothing and return
original model.
This is due to https://github.com/pytorch/pytorch/issues/75761.
"""
if any(
hasattr(m, "_packed_params") for m in getattr(model, "modules", lambda: [])()
) or any(getattr(arg, "is_quantized", False) for arg in args):
return torch.jit.trace(model, args)
return model
def _assign_onnx_node_name(graph, node_names):
"""Takes in ONNX graph, and mapping from _C.Node to node name in exported ONNX ModelProto.
Returns:
graph (_C.Graph): A TorchScript IR Graph with ONNX nodes, where each _C.Node gets its name
in exported ONNX ModelProto assigned as attribute ``onnx_name``.
"""
def n_fn(n, b_fn, node_names):
for b in n.blocks():
b_fn(b, node_names)
if n in node_names:
n.s_("onnx_name", node_names[n])
def b_fn(b, node_names):
for n in b.nodes():
n_fn(n, b_fn, node_names)
b_fn(graph, node_names)
return graph
def _model_to_graph(
model,
args,
verbose=False,
input_names=None,
output_names=None,
operator_export_type=_C_onnx.OperatorExportTypes.ONNX,
do_constant_folding=True,
_disable_torch_constant_prop=False,
fixed_batch_size=False,
training=_C_onnx.TrainingMode.EVAL,
dynamic_axes=None,
) -> Tuple[
_C.Graph,
Dict[str, torch.Tensor],
Optional[Union[torch.Tensor, Tuple[torch.Tensor], List[torch.Tensor]]],
]:
"""Converts model into an ONNX graph.
Returns:
graph: A TorchScript IR Graph with ONNX nodes.
params_dict: Dict from input param name to param value.
torch_out: The output tensors resulting from the trace of ``model``.
If ``model`` is a :class:`torch.jit.ScriptModule` or :class:`torch.jit.ScriptFunction`,
this will be None, since we are not doing any tracing.
"""
# TODO: can we simplify this to always return a tuple of Tensor or None?
# Special case for common case of passing a single Tensor
if isinstance(args, (torch.Tensor, int, float, bool)):
args = (args,)
model = _pre_trace_quant_model(model, args)
graph, params, torch_out, module = _create_jit_graph(model, args)
params_dict = _get_named_param_dict(graph, params)
try:
graph = _optimize_graph(
graph,
operator_export_type,
_disable_torch_constant_prop=_disable_torch_constant_prop,
fixed_batch_size=fixed_batch_size,
params_dict=params_dict,
dynamic_axes=dynamic_axes,
input_names=input_names,
module=module,
)
except Exception as e:
torch.onnx.log("Torch IR graph at exception: ", graph)
raise
is_script = isinstance(model, (torch.jit.ScriptFunction, torch.jit.ScriptModule))
if is_script:
example_outputs = _get_example_outputs(model, args)
example_outputs_final = ()
for example_output in example_outputs:
example_outputs_final += unpack_quantized_tensor(example_output)
out_vars, desc = torch.jit._flatten(example_outputs_final)
_C._jit_pass_onnx_assign_output_shape(
graph, out_vars, desc, GLOBALS.onnx_shape_inference, is_script
)
# NB: ONNX requires complete information about output types, which might be
# erased by some optimizations, so we need to set it explicitly again.
else:
if not isinstance(torch_out, (list, tuple)):
output_wrapped = [torch_out]
else:
output_wrapped = torch_out # type: ignore[assignment]
output_tensors, out_desc = _C._jit_flatten(tuple(output_wrapped))
# assign_output_shape pass is not compatible with quantized outputs.
# Quantized outputs are flattened to 3 values in ONNX, while packed as
# single value in PyTorch.
if not any(getattr(out, "is_quantized", False) for out in output_tensors):
_C._jit_pass_onnx_assign_output_shape(
graph,
output_tensors,
out_desc,
GLOBALS.onnx_shape_inference,
is_script,
)
_set_input_and_output_names(graph, input_names, output_names)
params_dict = _get_named_param_dict(graph, params)
if training is None or training == _C_onnx.TrainingMode.EVAL:
params_dict = _C._jit_pass_onnx_eval_peephole(graph, params_dict)
if (
do_constant_folding
and GLOBALS.export_onnx_opset_version in _constants.onnx_constant_folding_opsets
):
params_dict = _C._jit_pass_onnx_constant_fold(
graph, params_dict, GLOBALS.export_onnx_opset_version
)
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
if GLOBALS.onnx_shape_inference:
_C._jit_pass_onnx_graph_shape_type_inference(
graph, params_dict, GLOBALS.export_onnx_opset_version
)
params_dict = _C._jit_pass_onnx_eliminate_unused_items(graph, params_dict)
# For ONNX opset < 9, constants only have three data types: float16, float, double.
# In this pass transform constants of other data types to float/double + cast operator.
if GLOBALS.export_onnx_opset_version < 9:
_C._jit_pass_onnx_cast_all_constant_to_floating(graph)
params_dict = _C._jit_pass_filter_non_tensor_arguments(params_dict)
_C._jit_decay_packed_param_input_types(graph)
# If output names lack a proper name and are identified only by their unique
# give them a legible name for debugging purposes
_apply_friendly_debug_names(graph, params_dict)
return graph, params_dict, torch_out
def export_to_pretty_string(
model,
args,
export_params=True,
verbose=False,
training=_C_onnx.TrainingMode.EVAL,
input_names=None,
output_names=None,
operator_export_type=_C_onnx.OperatorExportTypes.ONNX,
export_type=None,
google_printer=False,
opset_version=None,
keep_initializers_as_inputs=None,
custom_opsets=None,
add_node_names=True,
do_constant_folding=True,
dynamic_axes=None,
):
r"""
Similar to :func:`export`, but returns a text representation of the ONNX
model. Only differences in args listed below. All other args are the same
as :func:`export`.
Args:
add_node_names (bool, default True): Whether or not to set
NodeProto.name. This makes no difference unless
``google_printer=True``.
google_printer (bool, default False): If False, will return a custom,
compact representation of the model. If True will return the
protobuf's `Message::DebugString()`, which is more verbose.
Returns:
A UTF-8 str containing a human-readable representation of the ONNX model.
"""
if opset_version is None:
opset_version = _constants.onnx_default_opset
if custom_opsets is None:
custom_opsets = {}
symbolic_helper._set_opset_version(opset_version)
symbolic_helper._set_operator_export_type(operator_export_type)
with exporter_context(model, training, verbose):
val_keep_init_as_ip = _decide_keep_init_as_input(
keep_initializers_as_inputs, operator_export_type, opset_version
)
val_add_node_names = _decide_add_node_names(
add_node_names, operator_export_type
)
val_do_constant_folding = _decide_constant_folding(
do_constant_folding, operator_export_type, training
)
args = _decide_input_format(model, args)
graph, params_dict, torch_out = _model_to_graph(
model,
args,
verbose,
input_names,
output_names,
operator_export_type,
val_do_constant_folding,
training=training,
dynamic_axes=dynamic_axes,
)
return graph._pretty_print_onnx( # type: ignore[attr-defined]
params_dict,
opset_version,
False,
operator_export_type,
google_printer,
val_keep_init_as_ip,
custom_opsets,
val_add_node_names,
)
def unconvertible_ops(
model, args, training=_C_onnx.TrainingMode.EVAL, opset_version=None
):
r"""
Converts the model with operator_export_type set to
torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH once in order to get a list of
all the ops that are not supported/implemented by the exporter.
Args:
model: Same as corresponding arg to torch.onnx.export.
args: Same as corresponding arg to torch.onnx.export.
training: Same as corresponding arg to torch.onnx.export.
opset_version: Same as corresponding arg to torch.onnx.export.
Returns:
Tuple[torch._C.Graph, List[str]], where the list includes the names
of the unconvertible ops.
"""
opset_version = opset_version or _constants.onnx_default_opset
symbolic_helper._set_opset_version(opset_version)
# operator_export_type is set to ONNX_FALLTHROUGH by default so that if an op is not supported
# in ONNX, fall through will occur and export the operator as is, as a custom ONNX op.
with exporter_context(model, training, False):
args = _decide_input_format(model, args)
graph, params_dict, torch_out = _model_to_graph(
model,
args,
# So that if an op connot be converted to ONNX, it will be kept
# as-is rather than cause a failure.
operator_export_type=_C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH,
)
unsupported_ops = list()
supported_namespaces = ("onnx", "prim", "quantized")
for node in graph.nodes():
if node.kind().split(":")[0] not in supported_namespaces:
unsupported_ops.append(node.kind())
return graph, unsupported_ops
def _setup_trace_module_map(model, export_modules_as_functions):
def __setup_trace_module_map():
trace_module_map = {_m: torch.typename(type(_m)) for _m in model.modules()}
torch.jit._trace._trace_module_map = trace_module_map
return trace_module_map
def __register_attribute_hook():
attr_name = "_onnx_attrs"
def _track_module_attributes_forward_pre_hook(module, input):
setattr(module, attr_name, _get_module_attributes(module))
def _track_module_attributes_forward_hook(module, input, output):
tracing_state = _C._get_tracing_state()
if not tracing_state:
return
graph = tracing_state.graph()
onnx_attrs = {}
if hasattr(module, attr_name):
onnx_attrs = getattr(module, attr_name)
delattr(module, attr_name)
_C._jit_pass_onnx_track_scope_attributes(graph, onnx_attrs)
for m in model.modules():
m.register_forward_hook(_track_module_attributes_forward_hook)
m.register_forward_pre_hook(_track_module_attributes_forward_pre_hook)
if isinstance(export_modules_as_functions, bool) and export_modules_as_functions:
trace_module_map = __setup_trace_module_map()
export_modules_as_functions = {v for k, v in trace_module_map.items()}
elif (
isinstance(export_modules_as_functions, set)
and len(export_modules_as_functions) > 0
):
def _find_typename(v):
if isinstance(v, type):
return torch.typename(v)
else:
raise RuntimeError(
"Only type of the `nn.Module` should be "
"passed in the set for argument `export_modules_as_functions`. "
"Got `%s`." % (type(v).__name__)
)
trace_module_map = __setup_trace_module_map()
module_typenames = {_find_typename(v) for v in export_modules_as_functions}
export_modules_as_functions = module_typenames
else:
export_modules_as_functions = None
if export_modules_as_functions:
__register_attribute_hook()
return export_modules_as_functions
def _reset_trace_module_map():
torch.jit._trace._trace_module_map = None
_C._jit_pass_onnx_clear_scope_records()
def _get_module_attributes(module):
annotations = typing.get_type_hints(type(module))
base_m_annotations = typing.get_type_hints(torch.nn.Module)
[annotations.pop(k, None) for k in base_m_annotations]
return {k: getattr(module, k) for k in annotations}
def _export(
model,
args,
f,
export_params=True,
verbose=False,
training=_C_onnx.TrainingMode.EVAL,
input_names=None,
output_names=None,
operator_export_type=_C_onnx.OperatorExportTypes.ONNX,
export_type=None,
opset_version=None,
do_constant_folding=True,
dynamic_axes=None,
keep_initializers_as_inputs=None,
fixed_batch_size=False,
custom_opsets=None,
add_node_names=True,
onnx_shape_inference=True,
export_modules_as_functions=False,
):
if export_type is None:
export_type = _exporter_states.ExportTypes.PROTOBUF_FILE
if isinstance(model, torch.nn.DataParallel):
raise ValueError(
"torch.nn.DataParallel is not supported by ONNX "
"exporter, please use 'attribute' module to "
"unwrap model from torch.nn.DataParallel. Try "
"torch.onnx.export(model.module, ...)"
)
assert GLOBALS.in_onnx_export is False
GLOBALS.in_onnx_export = True
try:
symbolic_helper._set_onnx_shape_inference(onnx_shape_inference)
if opset_version is None:
opset_version = _constants.onnx_default_opset
if export_modules_as_functions and opset_version < 15:
raise ValueError(
"`export_modules_as_functions` is not supported for `opset_version` < 15."
"This is because `opset_version` < 15 implies IR version < 8, which means "
"no local function support. "
)
export_modules_as_functions = _setup_trace_module_map(
model, export_modules_as_functions
)
if not operator_export_type:
if _C_onnx._CAFFE2_ATEN_FALLBACK:
operator_export_type = _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
else:
operator_export_type = _C_onnx.OperatorExportTypes.ONNX
# By default, training=TrainingMode.EVAL,
# which is good because running a model in training mode could result in
# internal buffers getting updated, dropout getting applied, etc.
# If you really know what you're doing, you can turn
# training=TrainingMode.TRAINING or training=TrainingMode.PRESERVE,
# (to preserve whatever the original training mode was.)
symbolic_helper._set_opset_version(opset_version)
symbolic_helper._set_operator_export_type(operator_export_type)
with exporter_context(model, training, verbose):
val_keep_init_as_ip = _decide_keep_init_as_input(
keep_initializers_as_inputs, operator_export_type, opset_version
)
val_add_node_names = _decide_add_node_names(
add_node_names, operator_export_type
)
val_do_constant_folding = _decide_constant_folding(
do_constant_folding, operator_export_type, training
)
# Normally f can be a file-like object, but for large models, the external data format requires a
# valid `model_file_location`. Code in export.cpp will enforce this.
if isinstance(f, str):
model_file_location = f
else:
model_file_location = ""
args = _decide_input_format(model, args)
if dynamic_axes is None:
dynamic_axes = {}
_validate_dynamic_axes(dynamic_axes, model, input_names, output_names)
graph, params_dict, torch_out = _model_to_graph(
model,
args,
verbose,
input_names,
output_names,
operator_export_type,
val_do_constant_folding,
fixed_batch_size=fixed_batch_size,
training=training,
dynamic_axes=dynamic_axes,
)
# TODO: Don't allocate a in-memory string for the protobuf
defer_weight_export = (
export_type is not _exporter_states.ExportTypes.PROTOBUF_FILE
)
if custom_opsets is None:
custom_opsets = {}
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
node_attr_to_name = {} # type: ignore[var-annotated]
if export_modules_as_functions:
# NOTE: cannot call DCE after this pass. DCE will remove function definition nodes.
node_attr_to_name = _C._jit_pass_onnx_function_extraction(
graph, export_modules_as_functions, list(params_dict.keys())
)
params_dict = _C._jit_pass_onnx_deduplicate_initializers( # type: ignore[assignment]
graph, params_dict, getattr(model, "training", False) # type: ignore[arg-type]
)
if export_params:
(
proto,
export_map,
val_use_external_data_format,
node_names,
) = graph._export_onnx( # type: ignore[attr-defined]
params_dict,
opset_version,
dynamic_axes,
defer_weight_export,
operator_export_type,
not verbose,
val_keep_init_as_ip,
custom_opsets,
val_add_node_names,
model_file_location,
node_attr_to_name,
)
else:
(
proto,
export_map,
val_use_external_data_format,
node_names,
) = graph._export_onnx( # type: ignore[attr-defined]
{},
opset_version,
dynamic_axes,
False,
operator_export_type,
not verbose,
val_keep_init_as_ip,
custom_opsets,
val_add_node_names,
model_file_location,
node_attr_to_name,
)
if verbose:
torch.onnx.log(
"Exported graph: ", _assign_onnx_node_name(graph, node_names)
)
if export_type == _exporter_states.ExportTypes.PROTOBUF_FILE:
assert len(export_map) == 0
with torch.serialization._open_file_like(f, "wb") as opened_file:
opened_file.write(proto)
elif export_type in [
_exporter_states.ExportTypes.ZIP_ARCHIVE,
_exporter_states.ExportTypes.COMPRESSED_ZIP_ARCHIVE,
]:
compression = (
zipfile.ZIP_DEFLATED
if export_type
== _exporter_states.ExportTypes.COMPRESSED_ZIP_ARCHIVE
else zipfile.ZIP_STORED
)
with zipfile.ZipFile(f, "w", compression=compression) as z:
z.writestr(_constants.ONNX_ARCHIVE_MODEL_PROTO_NAME, proto)
for k, v in export_map.items():
z.writestr(k, v)
elif export_type == _exporter_states.ExportTypes.DIRECTORY:
if os.path.exists(f):
assert os.path.isdir(f)
else:
os.makedirs(f)
model_proto_file = os.path.join(
f, _constants.ONNX_ARCHIVE_MODEL_PROTO_NAME
)
with torch.serialization._open_file_like(
model_proto_file, "wb"
) as opened_file:
opened_file.write(proto)
for k, v in export_map.items():
weight_proto_file = os.path.join(f, k)
with torch.serialization._open_file_like(
weight_proto_file, "wb"
) as opened_file:
opened_file.write(v)
else:
raise RuntimeError("Unknown export type")
# The ONNX checker only works for ONNX graph. So if the operator_export_type is not ONNX,
# we can skip this check.
# If large model format export is enabled, proto will only contain data location instead of
# raw data and _check_onnx_proto() will fail because it can only handle the raw ONNX proto
# string in memory.
if (operator_export_type is _C_onnx.OperatorExportTypes.ONNX) and (
not val_use_external_data_format
):
try:
_C._check_onnx_proto(proto, full_check=True)
except RuntimeError as e:
raise errors.CheckerError(e)
finally:
assert GLOBALS.in_onnx_export
GLOBALS.in_onnx_export = False
_reset_trace_module_map()
return torch_out
def _apply_friendly_debug_names(graph, params):
for n in graph.nodes():
for v in n.inputs():
old_name = v.debugName()
if old_name != str(v.unique()):
continue
new_name = f"{n.kind()}_{v.unique()}"
v.setDebugName(new_name)
if old_name in params:
params[new_name] = params.pop(old_name)
def _set_input_and_output_names(graph, input_names, output_names):
def set_names(node_list, name_list, descriptor):
if name_list is None:
return
if len(name_list) > len(node_list):
raise RuntimeError(
"number of %s names provided (%d) exceeded number of %ss (%d)"
% (descriptor, len(name_list), descriptor, len(node_list))
)
# Mark if the output node DebugName is set before.
output_node_set = set()
for i, (name, node) in enumerate(zip(name_list, node_list)):
# Duplicated output node, insert onnx::Identity to avoid setting the same DebugName after setDebugName().
if descriptor == "output":
if node in output_node_set:
identity_node = graph.create("onnx::Identity")
identity_node.insertAfter(node.node())
identity_node.addInput(node)
identity_node.output().setType(node.type())
graph.return_node().replaceInput(i, identity_node.output())
node = identity_node.output()
output_node_set.add(node)
if node.debugName() != name:
node.setDebugName(name)
set_names(list(graph.inputs()), input_names, "input")
set_names(list(graph.outputs()), output_names, "output")
def _run_symbolic_method(g, op_name, symbolic_fn, args):
r"""
This trampoline function gets invoked for every symbolic method
call from C++.
"""
try:
return symbolic_fn(g, *args)
except TypeError as e:
# Handle the specific case where we didn't successfully dispatch
# to symbolic_fn. Otherwise, the backtrace will have the clues
# you need.
e.args = (f"{e.args[0]} (occurred when translating {op_name})",)
raise
def _add_block(node: _C.Node):
return node.addBlock() # type: ignore[attr-defined]
def _add_input_to_block(block: _C.Block):
return block.addInputToBlock() # type: ignore[attr-defined]
def _add_output_to_block(block: _C.Block, value: _C.Value):
new_output = block.registerOutput(value) # type: ignore[attr-defined]
return new_output
# Note [Export inplace]
# ~~~~~~~~~~~~~~~~~~~~~
# In abstract, it would be better for us to export inplace annotations,
# than to not export them, since it is useful information that can
# help the target of an ONNX export export more efficiently. However,
# ONNX doesn't currently formalize inplace. Fortunately, it's sound to drop
# inplace annotations, but we are losing information this way.
def _find_symbolic_in_registry(
domain: str,
op_name: str,
opset_version: int,
operator_export_type: _C_onnx.OperatorExportTypes,
) -> Optional[Callable]:
"""Looks up for the symbolic function in the registry.
Args:
domain: The domain of the symbolic function.
op_name: The name of the op.
opset_version: Currect opset used.
operator_export_type: An enum in _C_onnx.OperatorExportTypes.
Returns:
The symbolic function if found, None otherwise.
"""
if not symbolic_registry.is_registered_op(op_name, domain, opset_version):
if operator_export_type == _C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH:
# Use the original node directly
return None
return symbolic_registry.get_registered_op(op_name, domain, opset_version)
def _should_aten_fallback(ns, op_name, opset_version, operator_export_type):
is_exportable_aten_op = symbolic_registry.is_registered_op(
op_name, "", opset_version
)
is_onnx_aten_export = operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN
is_aten_fallback_export = (
operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
)
return is_onnx_aten_export or (
not is_exportable_aten_op and is_aten_fallback_export
)
def _need_symbolic_context(symbolic_fn) -> bool:
"""Checks if the first argument to symbolic_fn is annotated as type `torch.onnx.SymbolicContext`."""
params = tuple(inspect.signature(symbolic_fn).parameters.values())
# When the annotation is postpone-evaluated, the annotation is a string
# and not a type. We need to use get_type_hints to get the real type.
if not params:
return False
first_param_name = params[0].name
type_hints = typing.get_type_hints(symbolic_fn)
if first_param_name not in type_hints:
return False
param_type = type_hints[first_param_name]
return issubclass(param_type, _exporter_states.SymbolicContext)
def _get_aten_op_overload_name(n: _C.Node) -> str:
# Returns `overload_name` attribute to ATen ops on non-Caffe2 builds
schema = n.schema()
if not schema.startswith("aten::") or symbolic_helper.is_caffe2_aten_fallback():
return ""
return _C.parse_schema(schema).overload_name
def _run_symbolic_function(
g: _C.Graph,
block: _C.Block,
n: _C.Node,
inputs: Any,
env: Dict[_C.Value, _C.Value],
operator_export_type=_C_onnx.OperatorExportTypes.ONNX,
) -> Optional[Union[_C.Value, Tuple[_C.Value, ...]]]:
"""Runs a symbolic function.
The function is used in C++ to export the node to ONNX.
Returns:
A single or a tuple of Values.
None when the node gets cloned as is into the new graph.
"""
opset_version = GLOBALS.export_onnx_opset_version
# See Note [Export inplace]
node_kind = n.kind()
if node_kind.endswith("_"):
# Treat relu_ -> relu; add_ -> add etc.
ns_op_name = node_kind[:-1]
else:
ns_op_name = node_kind
namespace, op_name = ns_op_name.split("::")
try:
symbolic_registry.register_version("", opset_version)
# Caffe2-specific: Quantized op symbolics are registered for opset 9 only.
if symbolic_helper.is_caffe2_aten_fallback() and opset_version == 9:
symbolic_caffe2.register_quantized_ops("caffe2", opset_version)
if namespace == "aten":
domain = ""
elif namespace == "quantized" and symbolic_helper.is_caffe2_aten_fallback():
domain = "caffe2"
else:
domain = namespace
if symbolic_registry.is_registered_op(op_name, domain, opset_version):
symbolic_fn = _find_symbolic_in_registry(
domain, op_name, opset_version, operator_export_type
)
assert symbolic_fn is not None
attrs = {k: symbolic_helper._node_get(n, k) for k in n.attributeNames()}
if _need_symbolic_context(symbolic_fn):
ctx = _exporter_states.SymbolicContext(_params_dict, env, n, block)
return symbolic_fn(ctx, g, *inputs, **attrs)
# PythonOp symbolic need access to the node to resolve the name conflict,
# this is inconsistent with regular op symbolic.
if op_name == "PythonOp":
inputs = (n, *inputs)
return symbolic_fn(g, *inputs, **attrs)
elif namespace == "onnx":
# Clone node to trigger ONNX shape inference
attrs = {
k + "_" + n.kindOf(k)[0]: symbolic_helper._node_get(n, k)
for k in n.attributeNames()
}
return g.op(op_name, *inputs, **attrs, outputs=n.outputsSize()) # type: ignore[attr-defined]
elif _should_aten_fallback(
namespace, op_name, opset_version, operator_export_type
):
# Direct ATen export requested
attrs = {
k + "_" + n.kindOf(k)[0]: symbolic_helper._node_get(n, k)
for k in n.attributeNames()
}
outputs = n.outputsSize()
attrs["outputs"] = outputs
# `overload_name` is set for non-Caffe2 builds only
return g.at( # type: ignore[attr-defined]
op_name, *inputs, overload_name=_get_aten_op_overload_name(n), **attrs
)
else:
raise errors.UnsupportedOperatorError(
domain,
op_name,
opset_version,
symbolic_registry.get_op_supported_version(
op_name, domain, opset_version
),
)
except RuntimeError:
if operator_export_type == _C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH:
return None
elif (
operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
and not symbolic_helper.is_caffe2_aten_fallback()
):
# Emit ATen op for non-Caffe2 builds when `operator_export_type==ONNX_ATEN_FALLBACK`
attrs = {
k + "_" + n.kindOf(k)[0]: symbolic_helper._node_get(n, k)
for k in n.attributeNames()
}
return g.at( # type: ignore[attr-defined]
op_name, *inputs, overload_name=_get_aten_op_overload_name(n), **attrs
)
raise
except TypeError as e:
# Handle the specific case where we didn't successfully dispatch.
# Otherwise, the backtrace will have the clues you need.
e.args = (f"{e.args[0]} \n(Occurred when translating {op_name}).",)
raise
def get_ns_op_name_from_custom_op(symbolic_name):
if not bool(
re.match(r"^[a-zA-Z0-9-_]*::[a-zA-Z-_]+[a-zA-Z0-9-_]*$", symbolic_name)
):
raise ValueError(
f"Failed to register operator {symbolic_name}."
"The symbolic name must match the format Domain::Name, "
"and should start with a letter and contain only "
"alphanumerical characters"
)
ns, op_name = symbolic_name.split("::")
if ns == "onnx":
raise ValueError(
f"Failed to register operator {symbolic_name}. {ns} domain cannot be modified."
)
if ns == "aten":
ns = ""
return ns, op_name
def register_custom_op_symbolic(symbolic_name, symbolic_fn, opset_version):
"""Registers a symbolic function for a custom operator.
When the user registers symbolic for custom/contrib ops,
it is highly recommended to add shape inference for that operator via setType API,
otherwise the exported graph may have incorrect shape inference in some extreme cases.
An example of setType is `test_aten_embedding_2` in `test_operators.py`.
See "Custom Operators" in the module documentation for an example usage.
Args:
symbolic_name (str): The name of the custom operator in "<domain>::<op>"
format.
symbolic_fn (Callable): A function that takes in the ONNX graph and
the input arguments to the current operator, and returns new
operator nodes to add to the graph.
opset_version (int): The ONNX opset version in which to register.
"""
ns, op_name = get_ns_op_name_from_custom_op(symbolic_name)
for version in itertools.chain(
_constants.onnx_stable_opsets, [_constants.onnx_main_opset]
):
if version >= opset_version:
symbolic_registry.register_op(op_name, symbolic_fn, ns, version)
def unregister_custom_op_symbolic(symbolic_name: str, opset_version: int):
"""Unregisters ``symbolic_name``.
See "Custom Operators" in the module documentation for an example usage.
Args:
symbolic_name (str): The name of the custom operator in "<domain>::<op>"
format.
opset_version (int): The ONNX opset version in which to unregister.
"""
ns, op_name = get_ns_op_name_from_custom_op(symbolic_name)
for version in itertools.chain(
_constants.onnx_stable_opsets, [_constants.onnx_main_opset]
):
if version >= opset_version:
symbolic_registry.unregister_op(op_name, ns, version)
def _validate_dynamic_axes(dynamic_axes, model, input_names, output_names):
"""Ensures dynamic axes argument is follows the expected format."""
if len(dynamic_axes) == 0:
return
if hasattr(model, "graph"):
# Extracting set of valid input/output names that shall be used for dynamic_axes
if (input_names is None) or len(input_names) == 0:
input_names = [x.debugName() for x in model.graph.inputs()]
if (output_names is None) or len(output_names) == 0:
output_names = [y.debugName() for y in model.graph.outputs()]
valid_names = set((input_names or []) + (output_names or []))
# If dynamic axes are provided as a list rather than dictionary, they should
# first get converted to a dictionary in expected format. If desired axes names
# are not provided for dynamic axes, automatic names shall be generated for
# provided dynamic axes of specified input/output
for key, value in dynamic_axes.items():
if key not in valid_names:
warnings.warn(
f"Provided key {key} for dynamic axes is not a valid input/output name"
)
if isinstance(value, list):
warnings.warn(
"No names were found for specified dynamic axes of provided input."
f"Automatically generated names will be applied to each dynamic axes of input {key}"
)
value_dict = {}
for i, x in enumerate(value):
if not isinstance(x, int):
raise ValueError(
"The type of axis index is expected to be an integer"
)
if x in value_dict:
warnings.warn(
f"Duplicate dynamic axis index {x} was provided for input {key}."
)
else:
value_dict[x] = str(key) + "_dynamic_axes_" + str(i + 1)
dynamic_axes[key] = value_dict
| pytorch-master | torch/onnx/utils.py |
"""Experimental classes and functions used by ONNX export."""
import dataclasses
from typing import Mapping, Optional, Sequence, Set, Type, Union
import torch
import torch._C._onnx as _C_onnx
@dataclasses.dataclass
class ExportOptions:
"""Arguments used by :func:`torch.onnx.export`.
TODO: Adopt this in `torch.onnx.export` api to replace keyword arguments.
"""
export_params: bool = True
verbose: bool = False
training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL
input_names: Optional[Sequence[str]] = None
output_names: Optional[Sequence[str]] = None
operator_export_type: _C_onnx.OperatorExportTypes = _C_onnx.OperatorExportTypes.ONNX
opset_version: Optional[int] = None
do_constant_folding: bool = True
dynamic_axes: Optional[Mapping[str, Union[Mapping[int, str], Sequence[int]]]] = None
keep_initializers_as_inputs: Optional[bool] = None
custom_opsets: Optional[Mapping[str, int]] = None
export_modules_as_functions: Union[bool, Set[Type[torch.nn.Module]]] = False
| pytorch-master | torch/onnx/_experimental.py |
import importlib
import inspect
import itertools
import warnings
from typing import Any, Callable, Dict, Tuple, Union
from torch import _C
from torch.onnx import _constants, errors
__all__ = [
"get_op_supported_version",
"get_ops_in_version",
"get_registered_op",
"is_registered_op",
"is_registered_version",
"register_op",
"register_ops_helper",
"register_ops_in_version",
"register_version",
"unregister_op",
]
_SymbolicFunction = Callable[..., Union[_C.Value, Tuple[_C.Value]]]
"""
The symbolic registry "_registry" is a dictionary that maps operators
(for a specific domain and opset version) to their symbolic functions.
An operator is defined by its domain, opset version, and opname.
The keys are tuples (domain, version), (where domain is a string, and version is an int),
and the operator's name (string).
The map's entries are as follows : _registry[(domain, version)][op_name] = op_symbolic
"""
_registry: Dict[
Tuple[str, int],
Dict[str, _SymbolicFunction],
] = {}
_symbolic_versions: Dict[Union[int, str], Any] = {}
def _import_symbolic_opsets():
for opset_version in itertools.chain(
_constants.onnx_stable_opsets, [_constants.onnx_main_opset]
):
module = importlib.import_module(f"torch.onnx.symbolic_opset{opset_version}")
global _symbolic_versions
_symbolic_versions[opset_version] = module
def register_version(domain: str, version: int):
if not is_registered_version(domain, version):
global _registry
_registry[(domain, version)] = {}
register_ops_in_version(domain, version)
def register_ops_helper(domain: str, version: int, iter_version: int):
for domain, op_name, op_func in get_ops_in_version(iter_version):
if not is_registered_op(op_name, domain, version):
register_op(op_name, op_func, domain, version)
def register_ops_in_version(domain: str, version: int):
"""Iterates through the symbolic functions of the specified opset version, and the
previous opset versions for operators supported in previous versions.
Opset 9 is the base version. It is selected as the base version because
1. It is the first opset version supported by PyTorch export.
2. opset 9 is more robust than previous opset versions. Opset versions like 7/8 have limitations
that certain basic operators cannot be expressed in ONNX. Instead of basing on these limitations,
we chose to handle them as special cases separately.
Backward support for opset versions beyond opset 7 is not in our roadmap.
For opset versions other than 9, by default they will inherit the symbolic functions defined in
symbolic_opset9.py.
To extend support for updated operators in different opset versions on top of opset 9,
simply add the updated symbolic functions in the respective symbolic_opset{version}.py file.
Checkout topk in symbolic_opset10.py, and upsample_nearest2d in symbolic_opset8.py for example.
"""
iter_version = version
while iter_version != 9:
register_ops_helper(domain, version, iter_version)
if iter_version > 9:
iter_version = iter_version - 1
else:
iter_version = iter_version + 1
register_ops_helper(domain, version, 9)
def get_ops_in_version(version: int):
if not _symbolic_versions:
_import_symbolic_opsets()
members = inspect.getmembers(_symbolic_versions[version])
domain_opname_ops = []
for obj in members:
if isinstance(obj[1], type) and hasattr(obj[1], "domain"):
ops = inspect.getmembers(obj[1], predicate=inspect.isfunction)
for op in ops:
domain_opname_ops.append((obj[1].domain, op[0], op[1])) # type: ignore[attr-defined]
elif inspect.isfunction(obj[1]):
if obj[0] == "_len":
obj = ("len", obj[1])
if obj[0] == "_list":
obj = ("list", obj[1])
if obj[0] == "_any":
obj = ("any", obj[1])
if obj[0] == "_all":
obj = ("all", obj[1])
domain_opname_ops.append(("", obj[0], obj[1]))
return domain_opname_ops
def is_registered_version(domain: str, version: int):
global _registry
return (domain, version) in _registry
def register_op(opname, op, domain, version):
if domain is None or version is None:
warnings.warn(
"ONNX export failed. The ONNX domain and/or version to register are None."
)
global _registry
if not is_registered_version(domain, version):
_registry[(domain, version)] = {}
_registry[(domain, version)][opname] = op
def is_registered_op(opname: str, domain: str, version: int):
if domain is None or version is None:
warnings.warn("ONNX export failed. The ONNX domain and/or version are None.")
global _registry
return (domain, version) in _registry and opname in _registry[(domain, version)]
def unregister_op(opname: str, domain: str, version: int):
global _registry
if is_registered_op(opname, domain, version):
del _registry[(domain, version)][opname]
if not _registry[(domain, version)]:
del _registry[(domain, version)]
else:
warnings.warn("The opname " + opname + " is not registered.")
def get_op_supported_version(opname: str, domain: str, version: int):
iter_version = version
while iter_version <= _constants.onnx_main_opset:
ops = [(op[0], op[1]) for op in get_ops_in_version(iter_version)]
if (domain, opname) in ops:
return iter_version
iter_version += 1
return None
def get_registered_op(opname: str, domain: str, version: int) -> _SymbolicFunction:
if domain is None or version is None:
warnings.warn("ONNX export failed. The ONNX domain and/or version are None.")
global _registry
if not is_registered_op(opname, domain, version):
raise errors.UnsupportedOperatorError(
domain, opname, version, get_op_supported_version(opname, domain, version)
)
return _registry[(domain, version)][opname]
| pytorch-master | torch/onnx/symbolic_registry.py |
"""ONNX exporter exceptions."""
from __future__ import annotations
import textwrap
from typing import Optional
from torch import _C
from torch.onnx import _constants
__all__ = [
"OnnxExporterError",
"CheckerError",
"UnsupportedOperatorError",
"SymbolicValueError",
]
class OnnxExporterError(RuntimeError):
"""Errors raised by the ONNX exporter."""
pass
class CheckerError(OnnxExporterError):
"""Raised when ONNX checker detects an invalid model."""
pass
class UnsupportedOperatorError(OnnxExporterError):
"""Raised when an operator is unsupported by the exporter."""
def __init__(
self, domain: str, op_name: str, version: int, supported_version: Optional[int]
):
if domain in {"", "aten", "prim", "quantized"}:
msg = f"Exporting the operator '{domain}::{op_name}' to ONNX opset version {version} is not supported. "
if supported_version is not None:
msg += (
f"Support for this operator was added in version {supported_version}, "
"try exporting with this version."
)
else:
msg += "Please feel free to request support or submit a pull request on PyTorch GitHub: "
msg += _constants.PYTORCH_GITHUB_ISSUES_URL
else:
msg = (
f"ONNX export failed on an operator with unrecognized namespace '{domain}::{op_name}'. "
"If you are trying to export a custom operator, make sure you registered "
"it with the right domain and version."
)
super().__init__(msg)
class SymbolicValueError(OnnxExporterError):
"""Errors around TorchScript values and nodes."""
def __init__(self, msg: str, value: _C.Value):
message = (
f"{msg} [Caused by the value '{value}' (type '{value.type()}') in the "
f"TorchScript graph. The containing node has kind '{value.node().kind()}'.] "
)
code_location = value.node().sourceRange()
if code_location:
message += f"\n (node defined in {code_location})"
try:
# Add its input and output to the message.
message += "\n\n"
message += textwrap.indent(
(
"Inputs:\n"
+ (
"\n".join(
f" #{i}: {input_} (type '{input_.type()}')"
for i, input_ in enumerate(value.node().inputs())
)
or " Empty"
)
+ "\n"
+ "Outputs:\n"
+ (
"\n".join(
f" #{i}: {output} (type '{output.type()}')"
for i, output in enumerate(value.node().outputs())
)
or " Empty"
)
),
" ",
)
except AttributeError:
message += (
" Failed to obtain its input and output for debugging. "
"Please refer to the TorchScript graph for debugging information."
)
super().__init__(message)
| pytorch-master | torch/onnx/errors.py |
# EDITING THIS FILE? READ THIS FIRST!
# see Note [Edit Symbolic Files] in symbolic_helper.py
# This file exports ONNX ops for opset 13
import torch
import torch._C._onnx as _C_onnx
from torch.onnx import (
_type_utils,
symbolic_helper,
symbolic_opset11 as opset11,
symbolic_opset9 as opset9,
utils,
)
@symbolic_helper.parse_args("v", "i", "none")
def softmax(g, input, dim, dtype=None):
softmax = g.op("Softmax", input, axis_i=dim)
if dtype and dtype.node().kind() != "prim::Constant":
parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype")
softmax = g.op(
"Cast", softmax, to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type()
)
return softmax
@symbolic_helper.parse_args("v", "i", "none")
def log_softmax(g, input, dim, dtype=None):
return_op = g.op("LogSoftmax", input, axis_i=dim)
if dtype and dtype.node().kind() != "prim::Constant":
parsed_dtype = symbolic_helper._get_const(dtype, "i", "dtype")
return_op = g.op(
"Cast", return_op, to_i=_type_utils.JitScalarType(parsed_dtype).onnx_type()
)
return return_op
@symbolic_helper.parse_args("v", "v", "i")
def frobenius_norm(g, self, dim=None, keepdim=False):
dim_val = symbolic_helper._maybe_get_const(dim, "is")
if not symbolic_helper._is_value(dim_val) and len(dim_val) == 0:
return g.op("ReduceL2", self, keepdims_i=0)
sqr = g.op("Mul", self, self)
sumsqr = symbolic_helper._reducesum_helper(g, sqr, dim, keepdims_i=keepdim)
return g.op("Sqrt", sumsqr)
@symbolic_helper.parse_args("v", "v", "i", "i")
def split(g, self, split_size_or_sizes, dim, _outputs=None):
if not symbolic_helper._is_split_static(split_size_or_sizes, _outputs):
split_out = g.op("SplitToSequence", self, split_size_or_sizes, axis_i=dim)
if _outputs is None:
return split_out
# Convert to multiple slice nodes iff number of splits and number of outputs are statically known.
if (
symbolic_helper._is_packed_list(split_size_or_sizes)
and len(symbolic_helper._unpack_list(split_size_or_sizes)) == _outputs
):
split_sizes = [
symbolic_helper._unsqueeze_helper(g, v, [0])
for v in symbolic_helper._unpack_list(split_size_or_sizes)
]
start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
axis = g.op("Constant", value_t=torch.tensor([dim], dtype=torch.long))
res = []
for i in range(_outputs):
end = g.op(
"Add", start, split_sizes[i]
) # split_sizes is a list of same length as _outputs
res.append(g.op("Slice", self, start, end, axis))
start = end
return res
return [
g.op(
"SequenceAt",
split_out,
g.op("Constant", value_t=torch.tensor([i], dtype=torch.long)),
)
for i in range(_outputs)
]
split_val = symbolic_helper._node_get(split_size_or_sizes.node(), "value")
if split_val.dim() > 0:
return g.op("Split", self, split_size_or_sizes, axis_i=dim, outputs=_outputs)
split_size = symbolic_helper._get_const(split_size_or_sizes, "i", "split_size")
size = symbolic_helper._get_tensor_dim_size(self, dim)
if size is None:
if _outputs is not None:
size = split_size * _outputs
else:
raise RuntimeError("Unknown dimension size not supported")
splits = [split_size] * (size // split_size)
leftover = size % split_size
if leftover:
splits.append(leftover)
splits = g.op("Constant", value_t=torch.tensor(splits))
return g.op("Split", self, splits, axis_i=dim, outputs=_outputs)
def split_with_sizes(g, self, split_sizes, dim, _outputs=None):
return split(g, self, split_sizes, dim, _outputs)
def unsafe_split(g, self, split_size_or_sizes, dim, _outputs=None):
return split(g, self, split_size_or_sizes, dim, _outputs)
def unsafe_split_with_sizes(g, self, split_sizes, dim, _outputs=None):
return split_with_sizes(g, self, split_sizes, dim, _outputs)
@symbolic_helper.parse_args("v", "v", "i", "i")
def tensor_split(g, self, indices_or_sections, dim, _outputs=None):
axis = g.op("Constant", value_t=torch.tensor(dim, dtype=torch.long))
axis = opset11.unsqueeze(g, axis, 0)
const_1 = g.op("Constant", value_t=torch.tensor(1, dtype=torch.long))
if symbolic_helper._is_split_static(indices_or_sections, _outputs):
split_val = symbolic_helper._node_get(indices_or_sections.node(), "value")
if split_val.dim() > 0:
start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
res = []
assert _outputs is not None
for i in range(_outputs - 1):
end = g.op(
"Gather",
indices_or_sections,
g.op("Constant", value_t=torch.tensor([i], dtype=torch.long)),
axis_i=0,
)
res.append(g.op("Slice", self, start, end, axis))
start = end
end = symbolic_helper._size_helper(g, self, axis)
res.append(g.op("Slice", self, start, end, axis))
return res
split_size = symbolic_helper._get_const(
indices_or_sections, "i", "indices_or_sections"
)
size = symbolic_helper._get_tensor_dim_size(self, dim)
if size is None:
if _outputs is not None:
size = split_size * _outputs
else:
raise RuntimeError("Unknown dimension size not supported")
min_split_size = size // split_size
num_splits_one_extra = size % split_size
splits = num_splits_one_extra * [min_split_size + 1]
leftover = (split_size - num_splits_one_extra) * [min_split_size]
splits = g.op(
"Constant", value_t=torch.tensor(splits + leftover, dtype=torch.long)
)
return g.op("Split", self, splits, axis_i=dim, outputs=_outputs)
if (
symbolic_helper._is_tensor(indices_or_sections)
and symbolic_helper._get_tensor_rank(indices_or_sections) == 1
):
loop_len = symbolic_helper._size_helper(
g, indices_or_sections, g.op("Constant", value_t=torch.tensor(0))
)
loop_len = opset11.unsqueeze(g, loop_len, 0)
loop_condition = g.op("Cast", const_1, to_i=_C_onnx.TensorProtoDataType.BOOL)
# To make the first slice in the below loop work,
# we pad a zero to the first position so that it will be the initial start of slice.
padding_0 = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
indices_or_sections = g.op("Concat", padding_0, indices_or_sections, axis_i=0)
final_splits = g.op("SequenceEmpty")
loop = g.op("Loop", loop_len, loop_condition, final_splits)
# Loop inputs
loop_block = utils._add_block(loop.node())
block_input_iter = utils._add_input_to_block(loop_block)
cond = utils._add_input_to_block(loop_block)
final_splits = utils._add_input_to_block(loop_block)
start = loop_block.op("Gather", indices_or_sections, block_input_iter, axis_i=0)
end = loop_block.op(
"Gather",
indices_or_sections,
loop_block.op("Add", block_input_iter, const_1),
axis_i=0,
)
slice = loop_block.op("Slice", self, start, end, axis)
final_splits = loop_block.op("SequenceInsert", final_splits, slice)
# Loop outputs
cond_out = loop_block.op("Identity", loop_condition)
utils._add_output_to_block(loop_block, cond_out)
utils._add_output_to_block(loop_block, final_splits)
loop_out = loop.node().output()
start = g.op(
"Gather",
indices_or_sections,
g.op("Constant", value_t=torch.tensor(-1, dtype=torch.long)),
axis_i=0,
)
start = opset11.unsqueeze(g, start, 0)
end = symbolic_helper._size_helper(g, self, axis)
last_slice = g.op("Slice", self, start, end, axis)
return g.op("SequenceInsert", loop_out, last_slice)
else: # scalar tensor
dim_size = symbolic_helper._size_helper(g, self, axis)
min_split_size = g.op("Div", dim_size, indices_or_sections)
min_split_size_plus_1 = g.op(
"Add",
min_split_size,
const_1,
)
num_splits_one_extra = g.op("Mod", dim_size, indices_or_sections)
splits = g.op("Tile", min_split_size_plus_1, num_splits_one_extra)
leftover = g.op(
"Tile",
min_split_size,
g.op(
"Sub",
opset11.unsqueeze(g, indices_or_sections, 0),
num_splits_one_extra,
),
)
splits = g.op("Concat", splits, leftover, axis_i=0)
if _outputs is None:
return g.op("SplitToSequence", self, splits, axis_i=dim)
return g.op("Split", self, splits, axis_i=dim, outputs=_outputs)
@symbolic_helper.parse_args("v", "i", "i")
def unbind(g, self, dim=0, _outputs=None):
if _outputs is None:
return g.op(
"SplitToSequence",
self,
g.op("Constant", value_t=torch.tensor(1, dtype=torch.long)),
axis_i=dim,
keepdims_i=0,
)
splits = g.op("Constant", value_t=torch.tensor([1] * _outputs))
outputs = g.op("Split", self, splits, axis_i=dim, outputs=_outputs)
outputs = [outputs] if _outputs == 1 else outputs
squeezed_outputs = [
g.op("Squeeze", out, g.op("Constant", value_t=torch.tensor([dim])))
for out in outputs
]
return squeezed_outputs
# Emitted from `torch.nonzero(x, as_tuple=True)`
def nonzero_numpy(g, input, _outputs=None):
return unbind(g, opset9.nonzero(g, input), 1, _outputs=_outputs)
@symbolic_helper.parse_args("v", "v", "v", "i")
def where(g, condition, self=None, other=None, _outputs=None):
# Assumes that torch.where's first argument takes only Bool and Byte tensors.
if condition.type().scalarType() != "Bool":
condition = g.op("Cast", condition, to_i=_C_onnx.TensorProtoDataType.BOOL)
if self is None:
condition = opset9.nonzero(g, condition)
return symbolic_helper._unbind_helper(
g, condition, g.op("Constant", value_t=torch.tensor(1)), _outputs
)
return g.op("Where", condition, self, other)
@symbolic_helper.parse_args("v", "v", "v", "i", "i", "i")
def fake_quantize_per_channel_affine(
g, inputs, scale, zero_point, axis, quant_min=-128, quant_max=127
):
# NOTE: (0, 127) is allowed as special case. PyTorch restricts activations to be in the range (0, 127).
# https://github.com/pytorch/pytorch/blob/b34b192d6b97325c9f78e5995c48c8498ede34bd/torch/ao/quantization/observer.py#L1422
if (quant_min, quant_max) not in [(0, 255), (-128, 127), (0, 127)]:
raise RuntimeError(
"For (quant_min, quant_max), ONNX allows only (0, 127), (0, 255) and (-128, 127). "
"Got ({}, {})".format(quant_min, quant_max)
)
# ONNX defines zero_point to be int8 or uint8
if quant_min == 0:
zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.UINT8)
else:
zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.INT8)
quantized = g.op("QuantizeLinear", inputs, scale, zero_point, axis_i=axis)
if (quant_min, quant_max) == (0, 127):
quantized = g.op(
"Clip",
quantized,
opset9.unused(g),
g.op("Constant", value_t=torch.tensor(127, dtype=torch.uint8)),
)
return g.op("DequantizeLinear", quantized, scale, zero_point, axis_i=axis)
@symbolic_helper.parse_args("v", "v", "v", "i", "i")
def fake_quantize_per_tensor_affine(
g, inputs, scale, zero_point, quant_min=-128, quant_max=127
):
# NOTE: (0, 127) is allowed as special case. PyTorch restricts activations to be in the range (0, 127).
# https://github.com/pytorch/pytorch/blob/b34b192d6b97325c9f78e5995c48c8498ede34bd/torch/ao/quantization/observer.py#L1422
if (quant_min, quant_max) not in [(0, 255), (-128, 127), (0, 127)]:
raise RuntimeError(
"For (quant_min, quant_max), ONNX allows only (0, 127), (0, 255) and (-128, 127). "
"Got ({}, {})".format(quant_min, quant_max)
)
if quant_min == 0:
zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.UINT8)
else:
zero_point = g.op("Cast", zero_point, to_i=_C_onnx.TensorProtoDataType.INT8)
if scale.type().scalarType() != "Float":
scale = g.op("Cast", scale, to_i=_C_onnx.TensorProtoDataType.FLOAT)
quantized = g.op("QuantizeLinear", inputs, scale, zero_point)
if (quant_min, quant_max) == (0, 127):
quantized = g.op(
"Clip",
quantized,
opset9.unused(g),
g.op("Constant", value_t=torch.tensor(127, dtype=torch.uint8)),
)
return g.op("DequantizeLinear", quantized, scale, zero_point)
def _reduce_op_symbolic(onnx_op_name):
def symbolic(g, self, dim=None, keepdim=None):
self = opset9._maybe_cast_reduce_op_input(g, self)
if dim is None:
# all-reduce path
return symbolic_helper._handle_reduce_dim_none(g, self, onnx_op_name)
else:
keepdim = symbolic_helper._get_const(keepdim, "i", "keepdim")
return g.op(onnx_op_name, self, dim, keepdims_i=keepdim)
return symbolic
def _reduce_with_dtype(onnx_op, name):
symbolic = _reduce_op_symbolic(onnx_op)
@opset9.overload_by_arg_count
def reduce(g, *args, **kwargs):
@symbolic_helper.parse_args("v", "none")
def reduce_nodim(g, self, dtype):
if dtype.node().kind() == "onnx::Constant":
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
self = g.op(
"Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type()
)
elif dtype.node().kind() != "prim::Constant":
return symbolic_helper._unimplemented(name, "dtype")
return symbolic(g, self)
@symbolic_helper.parse_args("v", "v", "i", "none")
def reduce_dim(g, self, dim, keepdim, dtype):
if dtype.node().kind() == "onnx::Constant":
dtype = symbolic_helper._get_const(dtype, "i", "dtype")
self = g.op(
"Cast", self, to_i=_type_utils.JitScalarType(dtype).onnx_type()
)
elif dtype.node().kind() != "prim::Constant":
return symbolic_helper._unimplemented(name, "dtype")
return symbolic(g, self, dim, keepdim)
return reduce_nodim, reduce_dim
return reduce
# TODO(justinchuby): Rename the op to avoid colliding with the builtin sum.
sum = _reduce_with_dtype("ReduceSum", "sum")
@symbolic_helper.parse_args("v", "i", "i", "i")
def unsafe_chunk(g, self, chunks, dim, _outputs=None):
if _outputs is None:
return g.op(
"SplitToSequence",
self,
g.op("Constant", value_t=torch.tensor(1, dtype=torch.long)),
axis_i=dim,
keepdims_i=0,
)
size = symbolic_helper._get_tensor_dim_size(self, dim)
if size is None:
return symbolic_helper._unimplemented("unsafe_chunk", "unknown dimension size")
split_size = (size + chunks - 1) // chunks
splits = [split_size] * (size // split_size)
leftover = size % split_size
if leftover:
splits.append(leftover)
# TODO: So far we don"t have a module using this method. We"ll keep
# this as a constant unless we see a request of dynamics in any
# user's modules.
splits = g.op("Constant", value_t=torch.tensor(splits, dtype=torch.long))
return g.op("Split", self, splits, axis_i=dim, outputs=_outputs)
def repeat_interleave(g, self, repeats, dim=None, output_size=None):
input = self
final_dim = dim
# if dim is None flatten
# By default, use the flattened input array, and return a flat output array
if symbolic_helper._is_none(dim):
input = symbolic_helper._reshape_helper(
g, self, g.op("Constant", value_t=torch.tensor([-1]))
)
dim = 0
else:
dim = symbolic_helper._maybe_get_scalar(dim)
repeats_dim = symbolic_helper._get_tensor_rank(repeats)
repeats_sizes = symbolic_helper._get_tensor_sizes(repeats)
input_sizes = symbolic_helper._get_tensor_sizes(input)
if repeats_dim is None:
raise RuntimeError(
"Unsupported: ONNX export of repeat_interleave for unknown " "repeats rank."
)
if repeats_sizes is None:
raise RuntimeError(
"Unsupported: ONNX export of repeat_interleave for unknown " "repeats size."
)
if input_sizes is None:
raise RuntimeError(
"Unsupported: ONNX export of repeat_interleave for unknown " "input size."
)
# Handle cases where dim is negative
if dim < 0:
dim += len(input_sizes)
output_sizes = input_sizes.copy()
for idx, input_size in enumerate(input_sizes):
if input_size is None:
output_sizes[idx], input_sizes[idx] = 0, -1
cond_dynamic_repeats = repeats_dim == 1 and repeats_sizes[0] is None
# If input size is dynamic or repeats vector is dynamic
if output_sizes[dim] == 0 or cond_dynamic_repeats:
reps = symbolic_helper._size_helper(g, input, dim)
reps = opset11.unsqueeze(g, reps, 0)
# Check if repeats vector is a single integer value
# or a single dimension tensor with non-dynamic values
if repeats_dim == 0 or (repeats_dim == 1 and repeats_sizes[0] == 1):
if not symbolic_helper._is_tensor(repeats):
repeats = g.op("Constant", value_t=torch.LongTensor(repeats))
repeats = g.op("Expand", repeats, reps)
# Check if repeats is dynamic
# As repeats is dynamic, we use a where node as a substitute for the if statement
# If repests_dim = 1, expand repeats otherwise use original tensor
elif cond_dynamic_repeats:
repeat_dim = symbolic_helper._size_helper(
g, repeats, g.op("Constant", value_t=torch.LongTensor([0]))
)
repeat_cond = g.op(
"Equal", repeat_dim, g.op("Constant", value_t=torch.LongTensor([1]))
)
repeats = where(g, repeat_cond, g.op("Expand", repeats, reps), repeats)
# There are cases when the repeats are 1-d tensor with multiple repeats, but dim
# provided along one of the dynamic axes provided. A simple example would be
# input.shape -> [1, 1, *] where * represents the dynamic axes, and dim = 2
# Now, repeat interleaving can be performed in pytorch when the value of * matches
# with the number of elements in repeat, for example if * -> 2, number of repeats
# should be 2 as well.
else:
return opset9.repeat_interleave(g, self, repeats, final_dim)
reps_like = g.op(
"ConstantOfShape",
g.op("Shape", repeats),
value_t=torch.tensor([1], dtype=torch.long),
)
r_splits = split(g, repeats, reps_like, 0)
i_splits = split(g, input, reps_like, dim)
output_sizes[dim], input_sizes[dim] = -1, 1
# Create a loop to iterate over each value along the dimension
# and perform individual interleaving using the repeats tensor
# Loop is of the following pattern
# input (trip_count, cond)
# int trip_count = ...;
# bool cond = ...;
# for (int i=0; i < trip_count && cond; ++i) {
# cond = ...;
# }
# Loop conditions
loop_condition = g.op("Constant", value_t=torch.tensor(1))
loop_condition = g.op("Cast", loop_condition, to_i=9)
loop_len = reps
# Create an empty sequence to store final expansions
final_splits = g.op("SequenceEmpty")
loop = g.op("Loop", loop_len, loop_condition, final_splits)
# Loop inputs
loop_block = utils._add_block(loop.node())
block_input_iter = utils._add_input_to_block(loop_block)
cond = utils._add_input_to_block(loop_block)
final_splits = utils._add_input_to_block(loop_block)
r_split = loop_block.op("SequenceAt", r_splits, block_input_iter)
i_split = loop_block.op("SequenceAt", i_splits, block_input_iter)
i_split = opset11.unsqueeze(loop_block, i_split, dim + 1)
r_concat = [
loop_block.op("Constant", value_t=torch.LongTensor(input_sizes[: dim + 1])),
r_split,
loop_block.op("Constant", value_t=torch.LongTensor(input_sizes[dim + 1 :])),
]
r_concat = loop_block.op("Concat", *r_concat, axis_i=0)
i_split = opset9.expand(loop_block, i_split, r_concat, None)
i_split = symbolic_helper._reshape_helper(
loop_block, i_split, g.op("Constant", value_t=torch.LongTensor(output_sizes))
)
final_splits = loop_block.op("SequenceInsert", final_splits, i_split)
# Loop outputs
cond_out = loop_block.op("Cast", loop_condition, to_i=9)
utils._add_output_to_block(loop_block, cond_out)
utils._add_output_to_block(loop_block, final_splits)
loop_out = loop.node().output()
loop_out = g.op("ConcatFromSequence", loop_out, axis_i=dim)
return loop_out
@symbolic_helper.parse_args("v", "i", "i", "i")
def diagonal(g, self, offset, dim1, dim2):
dim1_size = opset9.size(
g, self, dim=g.op("Constant", value_t=torch.LongTensor([dim1]))
)
dim2_size = opset9.size(
g, self, dim=g.op("Constant", value_t=torch.LongTensor([dim2]))
)
# Create appropriate mask
mask_shape = g.op("Concat", dim1_size, dim2_size, axis_i=0)
mask = opset9.zeros(g, mask_shape, None, None, None)
mask = g.op("EyeLike", mask, k_i=offset)
# dim1 and dim2 appended as a dimension at the end of the shape
rank = symbolic_helper._get_tensor_rank(self)
if rank is not None:
axes = list(range(rank))
axes.remove(dim1)
axes.remove(dim2)
self = g.op("Transpose", self, perm_i=axes + [dim1, dim2])
else:
return symbolic_helper._unimplemented("diagonal", "unknown input rank")
# Multiply input and mask to calculate values along diagonal
# The mask consists of one values where diagonal values are to be calculated
# For example:
# [[1.1, 1.2, 1.3], * [[1, 0, 0] = [[1.1, 0, 0],
# [2.1, 2.2, 2.3], [0, 1, 0] [0, 2.2, 0],
# [3.1, 3.2, 3.3]] [0, 0, 1]] [0, 0, 3.3]]
result = g.op("Mul", self, mask)
result = symbolic_helper._reducesum_helper(g, result, axes_i=[-1], keepdims_i=0)
# Calculate gather indices based on offset and dims
# If offset is greater than zero, set offset to zero as this aids in
# calculation of selection window
offset_op = g.op("Constant", value_t=torch.LongTensor([offset]))
if offset >= 0:
diag_size = g.op(
"Max",
g.op("Min", dim1_size, g.op("Sub", dim2_size, offset_op)),
g.op("Constant", value_t=torch.LongTensor([0])),
)
offset = 0
else:
diag_size = g.op(
"Max",
g.op("Min", g.op("Add", dim1_size, offset_op), dim2_size),
g.op("Constant", value_t=torch.LongTensor([0])),
)
diag_size = g.op("Concat", diag_size, axis_i=0)
# Calculate which diagonal values to select
# For example, in cases with offsets:
# [[0, 1.1, 0]
# [0, 0, 2.2]]
# we need to select the last two columns, so we create a tensor
# with all columns that are to be selected
# So in this example, it is [1, 2]
select_window_ones_fill = opset9.ones(g, diag_size, 4, None, None)
select_window = g.op(
"CumSum",
select_window_ones_fill,
g.op("Constant", value_t=torch.LongTensor([0])),
)
select_window = g.op(
"Add",
select_window,
g.op("Constant", value_t=torch.LongTensor([abs(offset) - 1])),
)
gather_shape = [
opset9.size(g, result, dim=g.op("Constant", value_t=torch.LongTensor([axis])))
for axis in list(range(rank))[:-2]
]
gather_shape.append(diag_size)
gather_shape = g.op("Concat", *gather_shape, axis_i=0)
gather_indices = opset9.zeros(g, gather_shape, 4, None, None)
# There might be cases where offset value is greater than number of rows/columns
# and might cause the diagonal to overrun and as a result of this, diag_size would be zero.
# For example, if
# offset = 9, dim1_size = 2 (columns), dim2_size = 4 (rows)
# diag_size = max(min(2, (4-9)), 0) = 0, based on calculation above
# Cases with diagonal overrun always result in diag_size = max(0, -ve value) = 0
# In cases without diagonal overrun, we select the appropriate rows/columns along which we
# are calculating diagonal values. In cases with diagonal overrun, we return a tensor which has
# the dimension of the row/column where overrun occurred as 0-dim, as we are essentially
# returning an empty tensor
overrun_cond = g.op(
"Not",
g.op(
"Equal",
diag_size,
g.op("Constant", value_t=torch.tensor(0, dtype=torch.int64)),
),
)
if_op = g.op("If", overrun_cond)
if_node = if_op.node()
if_block = utils._add_block(if_node)
gather_indices_if_block = if_block.op("Add", gather_indices, select_window)
gather_indices_if_block = symbolic_helper._unsqueeze_helper(
if_block, gather_indices_if_block, [rank - 1]
)
final_non_overrun_ = if_block.op(
"GatherND", result, gather_indices_if_block, batch_dims_i=rank - 2
)
utils._add_output_to_block(if_block, final_non_overrun_)
else_block = utils._add_block(if_node)
final_overrun_ = opset9.zeros(else_block, gather_shape, 6, None, None)
utils._add_output_to_block(else_block, final_overrun_)
return if_op
class Quantized:
"""
https://github.com/pytorch/pytorch/wiki/PyTorch-ONNX-exporter#quantized-model-export
"""
domain = "quantized"
@staticmethod
def linear(g, q_input, q_weight, bias, op_scale, op_zero_point):
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight)
q_bias = symbolic_helper.requantize_bias_helper(
g, bias, input_scale, weight_scale, axis
)
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
output = opset9.linear(g, input, weight, bias)
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
@staticmethod
def conv2d(
g,
q_input,
q_weight,
bias,
stride,
padding,
dilation,
groups,
op_scale,
op_zero_point,
):
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight)
q_bias = symbolic_helper.requantize_bias_helper(
g, bias, input_scale, weight_scale, axis
)
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
output = opset9.conv2d(
g, input, weight, bias, stride, padding, dilation, groups
)
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
@staticmethod
def conv2d_relu(
g,
q_input,
q_weight,
bias,
stride,
padding,
dilation,
groups,
op_scale,
op_zero_point,
):
input, input_scale, _, _ = symbolic_helper.dequantize_helper(g, q_input)
weight, weight_scale, _, axis = symbolic_helper.dequantize_helper(g, q_weight)
q_bias = symbolic_helper.requantize_bias_helper(
g, bias, input_scale, weight_scale, axis
)
bias, _, _, _ = symbolic_helper.dequantize_helper(g, q_bias)
output = opset9.conv2d(
g, input, weight, bias, stride, padding, dilation, groups
)
output = opset9.relu(g, output)
return symbolic_helper.quantize_helper(g, output, op_scale, op_zero_point)
| pytorch-master | torch/onnx/symbolic_opset13.py |
r"""This file provides a location for operators that help exporting models via onnx.
E.g. `shape_as_tensor` and `reshape_from_tensor_shape`
are to make all dynamic sizes operations traceable.
NOTE: at one point these functions were implemented differently.
Since then we have implemented these directly in ATen, so this
file is kept purely for backward-compatibility.
"""
import torch
import torch.onnx
def shape_as_tensor(x):
return torch._shape_as_tensor(x)
def reshape_from_tensor_shape(x, shape):
return torch._reshape_from_tensor(x, shape)
| pytorch-master | torch/onnx/operators.py |
"""This file exports ONNX ops for opset 16.
Note [ONNX Operators that are added/updated in opset 16]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
https://github.com/onnx/onnx/blob/main/docs/Changelog.md#version-16-of-the-default-onnx-operator-set
New operators:
GridSample https://github.com/onnx/onnx/pull/3557
Updated operators:
Identity
If
LeakyRelu
Loop
PRelu
RoiAlign
Scan
ScatterElemenets
ScatterND
Where
GreaterOrEqual
LessOrEqual
SequenceMap
"""
# EDITING THIS FILE? READ THIS FIRST!
# see Note [Edit Symbolic Files] in symbolic_helper.py
from torch.nn.functional import (
GRID_SAMPLE_INTERPOLATION_MODES,
GRID_SAMPLE_PADDING_MODES,
)
from torch.onnx import _type_utils, symbolic_helper
# note (mkozuki): Why `grid_sampler` instead of `grid_sample`?
# Because `torch.nn.functional.grid_sample` calls `torch.grid_sampler`.
@symbolic_helper.parse_args("v", "v", "i", "i", "b")
def grid_sampler(g, input, grid, mode_enum, padding_mode_enum, align_corners):
mode_s = {v: k for k, v in GRID_SAMPLE_INTERPOLATION_MODES.items()}[mode_enum] # type: ignore[call-arg]
padding_mode_s = {v: k for k, v in GRID_SAMPLE_PADDING_MODES.items()}[padding_mode_enum] # type: ignore[call-arg]
return g.op(
"GridSample",
input,
grid,
align_corners_i=int(align_corners),
mode_s=mode_s,
padding_mode_s=padding_mode_s,
)
@symbolic_helper.parse_args("v", "i", "v", "v")
def scatter_add(g, self, dim, index, src):
if symbolic_helper.is_caffe2_aten_fallback():
return g.at("scatter", self, dim, index, src, overload_name="src")
src_type = src.type().scalarType()
src_sizes = symbolic_helper._get_tensor_sizes(src)
index_sizes = symbolic_helper._get_tensor_sizes(index)
if src_sizes != index_sizes:
return symbolic_helper._unimplemented(
"scatter_add",
f"`index` ({index_sizes}) should have the same dimensionality as `src` ({src_sizes})",
)
src = symbolic_helper._maybe_get_scalar(src)
if symbolic_helper._is_value(src):
return g.op("ScatterElements", self, index, src, axis_i=dim, reduction_s="add")
else:
# Check if scalar "src" has same type as self (PyTorch allows different
# type for scalar src (but not when src is tensor)). If not, insert Cast node.
if self.type().scalarType() != src_type:
src = g.op(
"Cast",
src,
to_i=_type_utils.JitScalarType.from_name(
self.type().scalarType()
).onnx_type(),
)
return g.op(
"ScatterElements",
self,
index,
src,
axis_i=dim,
reduction_s="add",
)
| pytorch-master | torch/onnx/symbolic_opset16.py |
import sys
from typing import Optional, Tuple
import torch
from torch._C import _onnx as _C_onnx
from torch.onnx import _type_utils, symbolic_helper, symbolic_opset9 as opset9, utils
# EDITING THIS FILE? READ THIS FIRST!
# see Note [Edit Symbolic Files] in symbolic_helper.py
# This file exports ONNX ops for opset 12
__all__ = [
"argmax",
"argmin",
"binary_cross_entropy_with_logits",
"celu",
"cross_entropy_loss",
"dropout",
"einsum",
"ge",
"le",
"native_dropout",
"nll_loss",
"nll_loss2d",
"nll_loss_nd",
"outer",
"pow",
"tensordot",
"unfold",
]
def _einsum_helper(g, equation, tensors):
if not tensors:
raise RuntimeError("Einsum inputs are empty.")
# ONNX does not support bool for Einsum inputs.
if tensors[0].type().scalarType() == "Bool":
tensors = [
g.op("Cast", tensor, to_i=_C_onnx.TensorProtoDataType.INT64)
for tensor in tensors
]
return g.op(
"Cast",
g.op("Einsum", *tensors, equation_s=equation),
to_i=_C_onnx.TensorProtoDataType.BOOL,
)
else:
return g.op("Einsum", *tensors, equation_s=equation)
@symbolic_helper.parse_args("s", "v")
def einsum(g, equation, tensor_list):
tensors = symbolic_helper._unpack_list(tensor_list)
return _einsum_helper(g, equation, tensors)
@symbolic_helper.parse_args("v", "v")
def outer(g, input, other):
# make sure to cast other to self's type
if other.type().scalarType() != input.type().scalarType():
other = g.op(
"Cast",
other,
to_i=_type_utils.JitScalarType.from_name(
input.type().scalarType()
).onnx_type(),
)
return _einsum_helper(g, "i,j->ij", [input, other])
def _dropout_returns_masked_input_and_mask(
g, input: torch._C.Value, p: float, train: bool
) -> Tuple[torch._C.Value, Optional[torch._C.Value]]:
symbolic_helper.check_training_mode(train, "dropout")
# In eval mode, dropout is non-op. That is, if the node's
# train param is set to False, dropout just returns its inputs.
if not train:
return input, None
p = g.op("Constant", value_t=torch.tensor(p))
t = g.op("Constant", value_t=torch.tensor(train, dtype=torch.bool))
r, mask = g.op("Dropout", input, p, t, outputs=2)
return r, mask
@symbolic_helper.parse_args("v", "f", "i")
def dropout(g, input, p, train):
masked, _ = _dropout_returns_masked_input_and_mask(g, input, p, train)
return masked
@symbolic_helper.parse_args("v", "f", "i")
def native_dropout(g, input, p, train):
return _dropout_returns_masked_input_and_mask(g, input, p, train)
def nll_loss(g, self, target, weight, reduction, ignore_index):
# none reduction : onnx::Constant[value={0}]
# mean reduction : onnx::Constant[value={1}]
# sum reduction : onnx::Constant[value={2}]
reduction = symbolic_helper._maybe_get_const(reduction, "i")
reduction_vals = ["none", "mean", "sum"]
reduction = reduction_vals[reduction]
# in onnx NegativeLogLikelihoodLoss specification, ignore_index is optional without default value.
# therefore we need to set ignore_index attribute even if it is not specified (e.g. ignore_index=-100).
ignore_index = symbolic_helper._maybe_get_const(ignore_index, "i")
if weight.node().mustBeNone():
nllloss = g.op(
"NegativeLogLikelihoodLoss",
self,
target,
reduction_s=reduction,
ignore_index_i=ignore_index,
)
else:
nllloss = g.op(
"NegativeLogLikelihoodLoss",
self,
target,
weight,
reduction_s=reduction,
ignore_index_i=ignore_index,
)
return nllloss
def nll_loss2d(g, self, target, weight, reduction, ignore_index):
return nll_loss(g, self, target, weight, reduction, ignore_index)
def nll_loss_nd(g, self, target, weight, reduction, ignore_index):
return nll_loss(g, self, target, weight, reduction, ignore_index)
def cross_entropy_loss(
g, self, target, weight, reduction, ignore_index, label_smoothing
):
# none reduction : onnx::Constant[value={0}]
# mean reduction : onnx::Constant[value={1}]
# sum reduction : onnx::Constant[value={2}]
reduction = symbolic_helper._maybe_get_const(reduction, "i")
reduction_vals = ["none", "mean", "sum"]
reduction = reduction_vals[reduction]
label_smoothing = symbolic_helper._maybe_get_const(label_smoothing, "f")
if label_smoothing > 0.0:
raise RuntimeError("Unsupported: ONNX does not support label_smoothing")
# in onnx SoftmaxCrossEntropyLoss specification, ignore_index is optional without default value.
# therefore we need to set ignore_index attribute even if it is not specified (e.g. ignore_index=-100).
ignore_index = symbolic_helper._maybe_get_const(ignore_index, "i")
if weight.node().mustBeNone():
celoss = g.op(
"SoftmaxCrossEntropyLoss",
self,
target,
reduction_s=reduction,
ignore_index_i=ignore_index,
)
else:
celoss = g.op(
"SoftmaxCrossEntropyLoss",
self,
target,
weight,
reduction_s=reduction,
ignore_index_i=ignore_index,
)
return celoss
@symbolic_helper.parse_args("v", "v", "v", "v", "i")
def binary_cross_entropy_with_logits(g, input, target, weight, pos_weight, reduction):
p = g.op("Constant", value_t=torch.tensor([1]))
sig_x = opset9.sigmoid(g, input)
log_sig_x = opset9.log(g, sig_x)
sub_1_x = opset9.sub(g, p, sig_x)
sub_1_y = opset9.sub(g, p, target)
log_1_x = opset9.log(g, sub_1_x)
if pos_weight is None or symbolic_helper._is_none(pos_weight):
output = opset9.neg(
g,
opset9.add(
g, opset9.mul(g, target, log_sig_x), opset9.mul(g, sub_1_y, log_1_x)
),
)
else:
output = opset9.neg(
g,
opset9.add(
g,
opset9.mul(g, opset9.mul(g, target, log_sig_x), pos_weight),
opset9.mul(g, sub_1_y, log_1_x),
),
)
if weight is not None and not symbolic_helper._is_none(weight):
output = opset9.mul(g, weight, output)
reduction = symbolic_helper._maybe_get_const(reduction, "i")
if reduction == 0:
return output
elif reduction == 1:
return g.op("ReduceMean", output, keepdims_i=0)
elif reduction == 2:
return g.op("ReduceSum", output, keepdims_i=0)
else:
return symbolic_helper._onnx_unsupported(
"binary_cross_entropy_with_logits with reduction other than none, mean, or sum"
)
def celu(g, self, alpha):
alpha = symbolic_helper._maybe_get_const(alpha, "f")
# if the input is of type double cast it to float
if self.type().scalarType() == "Double":
self = g.op("Cast", self, to_i=_C_onnx.TensorProtoDataType.FLOAT)
out = g.op("Celu", self, alpha_f=alpha)
return g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.DOUBLE)
return g.op("Celu", self, alpha_f=alpha)
@symbolic_helper.parse_args("v", "v", "i")
def argmax(g, input: torch._C.Value, dim: torch._C.Value, keepdim: int):
return symbolic_helper._argmin_argmax_helper(g, input, dim, keepdim, "ArgMax")
@symbolic_helper.parse_args("v", "v", "i")
def argmin(g, input: torch._C.Value, dim: torch._C.Value, keepdim: int):
return symbolic_helper._argmin_argmax_helper(g, input, dim, keepdim, "ArgMin")
def pow(g, self, exponent):
return g.op("Pow", self, exponent)
def ge(g, input, other):
return g.op("GreaterOrEqual", input, other)
def le(g, input, other):
return g.op("LessOrEqual", input, other)
@symbolic_helper.parse_args("v", "i", "v", "v")
def unfold(g, input, dimension, size, step):
const_size = symbolic_helper._maybe_get_const(size, "i")
const_step = symbolic_helper._maybe_get_const(step, "i")
if not symbolic_helper._is_value(const_size) and not symbolic_helper._is_value(
const_step
):
return opset9.unfold(g, input, dimension, const_size, const_step)
if symbolic_helper.is_caffe2_aten_fallback():
return g.at("unfold", input, dimension_i=dimension, size_i=size, step_i=step)
sizedim = symbolic_helper._get_tensor_dim_size(input, dimension)
if sizedim is not None:
low_start = g.op("Constant", value_t=torch.tensor(0))
low_end = g.op("Constant", value_t=torch.tensor(sizedim))
hi_end = g.op("Constant", value_t=torch.tensor(sizedim + 1))
low_indices = g.op("Range", low_start, low_end, step)
hi_indices = g.op("Range", size, hi_end, step)
low_size = symbolic_helper._size_helper(
g, low_indices, g.op("Constant", value_t=torch.tensor(0))
)
hi_size = symbolic_helper._size_helper(
g, hi_indices, g.op("Constant", value_t=torch.tensor(0))
)
ndim = symbolic_helper._get_tensor_rank(input)
assert ndim is not None
perm = list(range(0, ndim))
perm.append(perm.pop(dimension))
unsqueeze_list = []
loop_condition = g.op("Constant", value_t=torch.tensor(1))
loop_condition = g.op("Cast", loop_condition, to_i=9)
loop_len = g.op("Min", low_size, hi_size)
loop = g.op("Loop", loop_len, loop_condition)
loop_block = utils._add_block(loop.node())
block_input_iter = utils._add_input_to_block(loop_block)
cond = utils._add_input_to_block(loop_block)
starts = loop_block.op("Gather", low_indices, block_input_iter)
ends = loop_block.op("Gather", hi_indices, block_input_iter)
axes = loop_block.op("Constant", value_t=torch.tensor([2]))
starts = symbolic_helper._unsqueeze_helper(loop_block, starts, [0])
ends = symbolic_helper._unsqueeze_helper(loop_block, ends, [0])
stack = loop_block.op("Slice", input, starts, ends, axes)
unsqueeze = symbolic_helper._unsqueeze_helper(
loop_block, loop_block.op("Transpose", stack, perm_i=perm), [dimension]
)
unsqueeze_list.append(unsqueeze)
concat = loop_block.op("Concat", *unsqueeze_list, axis_i=0)
cond_out = loop_block.op("Cast", loop_condition, to_i=9)
utils._add_output_to_block(loop_block, cond_out)
utils._add_output_to_block(loop_block, concat)
loop_output = loop.node().output()
perm = [0, 1, 2, 3, 4]
perm[0], perm[dimension + 1] = perm[dimension + 1], perm[0]
transpose = g.op("Transpose", loop_output, perm_i=perm)
squeeze = symbolic_helper._squeeze_helper(g, transpose, [0])
return squeeze
else:
return symbolic_helper._unimplemented("Unfold", "input size not accessible")
@symbolic_helper.parse_args("v", "v", "is", "is", "v")
def tensordot(g, input_a, input_b, dims_a, dims_b, out=None):
if out is not None:
symbolic_helper._unimplemented(
"Tensordot", "Out parameter is not supported for tensordot."
)
dim_count_a = symbolic_helper._get_tensor_rank(input_a)
if dim_count_a is None:
raise RuntimeError(
"Unsupported: ONNX export of tensordot for tensor(input_a) of unknown rank."
)
dim_count_b = symbolic_helper._get_tensor_rank(input_b)
if dim_count_b is None:
raise RuntimeError(
"Unsupported: ONNX export of tensordot for tensor(input_b) of unknown rank."
)
dims_a = [
(dims_a[i] + dim_count_a) if (dims_a[i] < 0) else dims_a[i]
for i in range(len(dims_a))
]
dims_b = [
(dims_b[i] + dim_count_b) if (dims_b[i] < 0) else dims_b[i]
for i in range(len(dims_b))
]
left_dims_a = [i for i in range(dim_count_a) if (i not in dims_a)]
left_dims_b = [i for i in range(dim_count_b) if (i not in dims_b)]
new_input_a = opset9.permute(g, input_a, left_dims_a + dims_a)
new_input_b = opset9.permute(g, input_b, dims_b + left_dims_b)
input_shape = g.op("Shape", new_input_a)
left_sizes_a = symbolic_helper._slice_helper(
g, input_shape, axes=[0], starts=[0], ends=[len(left_dims_a)]
)
shape_sizes = [
left_sizes_a,
g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)),
]
output_a = opset9._reshape_from_tensor(g, new_input_a, shape_sizes)
input_shape = g.op("Shape", output_a)
slices = symbolic_helper._slice_helper(
g, input_shape, axes=[0], starts=[-1], ends=[sys.maxsize]
)
shape_sizes = [
g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)),
slices,
]
output_a = opset9._reshape_from_tensor(g, new_input_a, shape_sizes)
input_shape = g.op("Shape", new_input_b)
left_sizes_b = symbolic_helper._slice_helper(
g, input_shape, axes=[0], starts=[len(dims_b)], ends=[sys.maxsize]
)
slices = symbolic_helper._slice_helper(
g, input_shape, axes=[0], starts=[0], ends=[len(dims_b)]
)
shape_sizes = [
slices,
g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)),
]
output_b = opset9._reshape_from_tensor(g, new_input_b, shape_sizes)
input_shape = g.op("Shape", output_b)
slices = symbolic_helper._slice_helper(
g, input_shape, axes=[0], starts=[-1], ends=[sys.maxsize]
)
shape_sizes = [
g.op("Constant", value_t=torch.tensor([-1], dtype=torch.long)),
slices,
]
output_b = opset9._reshape_from_tensor(g, new_input_b, shape_sizes)
output = einsum(g, "ij,jk->ik", g.op("prim::ListConstruct", *[output_a, output_b]))
shape_sizes = [left_sizes_a, left_sizes_b]
return opset9._reshape_from_tensor(g, output, shape_sizes)
| pytorch-master | torch/onnx/symbolic_opset12.py |
from . import amp
| pytorch-master | torch/cpu/__init__.py |
import torch
from typing import Any
class autocast(torch.amp.autocast_mode.autocast):
r"""
See :class:`torch.autocast`.
``torch.cpu.amp.autocast(args...)`` is equivalent to ``torch.autocast("cpu", args...)``
"""
def __init__(self, enabled : bool = True, dtype : torch.dtype = torch.bfloat16, cache_enabled : bool = True):
if torch._jit_internal.is_scripting():
self._enabled = enabled
self.device = "cpu"
self.fast_dtype = dtype
return
super().__init__("cpu", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled)
def __enter__(self):
if torch._jit_internal.is_scripting():
return self
return super().__enter__()
# TODO: discuss a unified TorchScript-friendly API for autocast
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override]
if torch._jit_internal.is_scripting():
return
return super().__exit__(exc_type, exc_val, exc_tb)
def __call__(self, func):
if torch._jit_internal.is_scripting():
return func
return super().__call__(func)
| pytorch-master | torch/cpu/amp/autocast_mode.py |
from .autocast_mode import autocast
| pytorch-master | torch/cpu/amp/__init__.py |
try:
from urllib.parse import urlparse, urlunparse
except ImportError:
raise ImportError(
"urllib cannot be found, urlparse from python2 is no longer supported."
)
import numbers
import os
import sys
from datetime import timedelta
from typing import Dict, Optional
import torch._six as six
from torch.distributed import FileStore, PrefixStore, Store, TCPStore
from .constants import default_pg_timeout
_rendezvous_handlers = {}
def register_rendezvous_handler(scheme, handler):
"""Registers a new rendezvous handler.
Before we can run collective algorithms, participating processes
need to find each other and exchange information to be able to
communicate. We call this process rendezvous.
The outcome of the rendezvous process is a triplet containing a
shared key/value store, the rank of the process, and the total
number of participating processes.
If none of the bundled rendezvous methods apply to your execution
environment you can opt to register your own rendezvous handler.
Pick a unique name and use the URL scheme to identify it when
calling the `rendezvous()` function.
Args:
scheme (str): URL scheme to identify your rendezvous handler.
handler (function): Handler that is invoked when the
`rendezvous()` function is called with a URL that uses
the corresponding scheme. It must be a generator function
that yields the triplet.
"""
global _rendezvous_handlers
if scheme in _rendezvous_handlers:
raise RuntimeError(
"Rendezvous handler for {}:// already registered".format(scheme)
)
_rendezvous_handlers[scheme] = handler
# Query will have format "rank=0&world_size=1" and is
# converted into {"rank": 0, "world_size": 1}
def _query_to_dict(query: str) -> Dict[str, str]:
return dict((pair[0], pair[1]) for pair in (pair.split("=") for pair in filter(None, query.split("&"))))
def _rendezvous_helper(url: str, rank: int, world_size_opt: Optional[int], **kwargs):
result = urlparse(url)
if world_size_opt is None:
world_size = -1
if result.scheme == "env":
rank = int(os.environ.get("RANK", rank))
# If the world_size env variable is not present then it is a dynamic group
world_size = int(os.environ.get("WORLD_SIZE", world_size))
else:
world_size = world_size_opt
if rank != -1 or world_size != -1 or world_size_opt is None:
query_dict = _query_to_dict(result.query)
assert (
"rank" not in query_dict and "world_size" not in query_dict
), "The url: {url} has node-specific arguments(rank, world_size) already.".format(
url=url
)
if rank != -1:
query_dict["rank"] = str(rank)
if world_size != -1 or world_size_opt is None:
query_dict["world_size"] = str(world_size)
result = result._replace(
query="{}".format(
"&".join(["{}={}".format(k, v) for k, v in query_dict.items()])
)
)
url = urlunparse(result)
if result.scheme not in _rendezvous_handlers:
raise RuntimeError("No rendezvous handler for {}://".format(result.scheme))
return _rendezvous_handlers[result.scheme](url, **kwargs)
def rendezvous(url: str, rank: int = -1, world_size: int = -1, **kwargs):
if not isinstance(url, six.string_classes):
raise RuntimeError("`url` must be a string. {}: {}".format(type(url), url))
if not isinstance(rank, numbers.Integral):
raise RuntimeError("`rank` must be an integer. {}".format(rank))
if not isinstance(world_size, numbers.Integral):
raise RuntimeError("`world_size` must be an integer. {}".format(world_size))
return _rendezvous_helper(url, rank, world_size, **kwargs)
def _create_store_from_options(backend_options, rank):
store, _, _ = next(_rendezvous_helper(backend_options.init_method, rank, None))
return store
def _rendezvous_error(msg):
return ValueError("Error initializing torch.distributed using " + msg)
def _file_rendezvous_handler(url: str, **kwargs):
def _error(msg):
return _rendezvous_error("file:// rendezvous: " + msg)
result = urlparse(url)
path = result.path
if sys.platform == "win32":
import urllib.request
full_path = result.netloc + result.path
path = urllib.request.url2pathname(full_path)
if path:
# Normalizing an empty string produces ".", which is not expected.
path = os.path.normpath(path)
if not path:
raise _error("path missing")
query_dict = _query_to_dict(result.query)
if "rank" not in query_dict:
raise _error("rank parameter missing")
if "world_size" not in query_dict:
raise _error("world size parameter missing")
rank = int(query_dict["rank"])
world_size = int(query_dict["world_size"])
store = FileStore(path, world_size)
yield (store, rank, world_size)
# If this configuration is invalidated, there is nothing we can do about it
raise RuntimeError("Unable to perform rerendezvous using file:// method")
def _torchelastic_use_agent_store() -> bool:
return os.environ.get("TORCHELASTIC_USE_AGENT_STORE", None) == str(True)
def _create_c10d_store(hostname, port, rank, world_size, timeout) -> Store:
"""
Smartly creates a c10d Store object on ``rank`` based on whether
we need to re-use agent store. The TCPStore server is assumed to be hosted
on ``hostname:port``.
If ``torchelastic_use_agent_store()`` is ``True``, then it is assumed that
the agent leader (node rank 0) hosts the TCPStore server (for which the
endpoint is specified by the given ``hostname:port``). Hence
ALL ranks will create and return a TCPStore client (e.g. ``start_daemon=False``).
If ``torchelastic_use_agent_store()`` is ``False``, then rank 0 will host
the TCPStore (with multi-tenancy) and it is assumed that rank 0's hostname
and port are correctly passed via ``hostname`` and ``port``. All
non-zero ranks will create and return a TCPStore client.
"""
# check if port is uint16_t
if not 0 <= port < 2**16:
raise ValueError(f"port must have value from 0 to 65535 but was {port}.")
if _torchelastic_use_agent_store():
attempt = os.environ["TORCHELASTIC_RESTART_COUNT"]
tcp_store = TCPStore(hostname, port, world_size, False, timeout)
return PrefixStore(f"/worker/attempt_{attempt}", tcp_store)
else:
start_daemon = rank == 0
return TCPStore(
hostname, port, world_size, start_daemon, timeout, multi_tenant=True
)
def _tcp_rendezvous_handler(
url: str, timeout: timedelta = default_pg_timeout, **kwargs
):
def _error(msg):
return _rendezvous_error("tcp:// rendezvous: " + msg)
result = urlparse(url)
if not result.port:
raise _error("port number missing")
query_dict = _query_to_dict(result.query)
if "rank" not in query_dict:
raise _error("rank parameter missing")
if "world_size" not in query_dict:
raise _error("world size parameter missing")
rank = int(query_dict["rank"])
world_size = int(query_dict["world_size"])
assert result.hostname is not None
store = _create_c10d_store(result.hostname, result.port, rank, world_size, timeout)
yield (store, rank, world_size)
# If this configuration is invalidated, there is nothing we can do about it
raise RuntimeError("Unable to perform re-rendezvous using tcp:// method")
def _env_rendezvous_handler(
url: str, timeout: timedelta = default_pg_timeout, **kwargs
):
def _error(msg):
return _rendezvous_error("env:// rendezvous: " + msg)
def _env_error(var):
return _error("environment variable %s expected, but not set" % var)
def _get_env_or_raise(env_var: str) -> str:
env_val = os.environ.get(env_var, None)
if not env_val:
raise _env_error(env_var)
else:
return env_val
result = urlparse(url)
query_dict = _query_to_dict(result.query)
rank: int
world_size: int
master_port: int
master_addr: str
if "rank" in query_dict:
rank = int(query_dict["rank"])
else:
rank = int(_get_env_or_raise("RANK"))
if "world_size" in query_dict:
world_size = int(query_dict["world_size"])
else:
world_size = int(_get_env_or_raise("WORLD_SIZE"))
master_addr = _get_env_or_raise("MASTER_ADDR")
master_port = int(_get_env_or_raise("MASTER_PORT"))
store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout)
yield (store, rank, world_size)
# If this configuration is invalidated, there is nothing we can do about it
raise RuntimeError("Unable to perform re-rendezvous using env:// method")
register_rendezvous_handler("tcp", _tcp_rendezvous_handler)
register_rendezvous_handler("env", _env_rendezvous_handler)
register_rendezvous_handler("file", _file_rendezvous_handler)
| pytorch-master | torch/distributed/rendezvous.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
``torchrun`` provides a superset of the functionality as ``torch.distributed.launch``
with the following additional functionalities:
1. Worker failures are handled gracefully by restarting all workers.
2. Worker ``RANK`` and ``WORLD_SIZE`` are assigned automatically.
3. Number of nodes is allowed to change between minimum and maximum sizes (elasticity).
.. note:: ``torchrun`` is a python
`console script <https://packaging.python.org/en/latest/specifications/entry-points/#use-for-scripts>`_
to the main module
`torch.distributed.run <https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py>`_
declared in the ``entry_points`` configuration in
`setup.py <https://github.com/pytorch/pytorch/blob/master/setup.py>`_.
It is equivalent to invoking ``python -m torch.distributed.run``.
Transitioning from torch.distributed.launch to torchrun
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``torchrun`` supports the same arguments as ``torch.distributed.launch`` **except**
for ``--use_env`` which is now deprecated. To migrate from ``torch.distributed.launch``
to ``torchrun`` follow these steps:
1. If your training script is already reading ``local_rank`` from the ``LOCAL_RANK`` environment variable.
Then you need simply omit the ``--use_env`` flag, e.g.:
+--------------------------------------------------------------------+--------------------------------------------+
| ``torch.distributed.launch`` | ``torchrun`` |
+====================================================================+============================================+
| | |
| .. code-block:: shell-session | .. code-block:: shell-session |
| | |
| $ python -m torch.distributed.launch --use_env train_script.py | $ torchrun train_script.py |
| | |
+--------------------------------------------------------------------+--------------------------------------------+
2. If your training script reads local rank from a ``--local_rank`` cmd argument.
Change your training script to read from the ``LOCAL_RANK`` environment variable as
demonstrated by the following code snippet:
+-------------------------------------------------------+----------------------------------------------------+
| ``torch.distributed.launch`` | ``torchrun`` |
+=======================================================+====================================================+
| | |
| .. code-block:: python | .. code-block:: python |
| | |
| | |
| import argparse | import os |
| parser = argparse.ArgumentParser() | local_rank = int(os.environ["LOCAL_RANK"]) |
| parser.add_argument("--local_rank", type=int) | |
| args = parser.parse_args() | |
| | |
| local_rank = args.local_rank | |
| | |
+-------------------------------------------------------+----------------------------------------------------+
The aformentioned changes suffice to migrate from ``torch.distributed.launch`` to ``torchrun``.
To take advantage of new features such as elasticity, fault-tolerance, and error reporting of ``torchrun``
please refer to:
* :ref:`elastic_train_script` for more information on authoring training scripts that are ``torchrun`` compliant.
* the rest of this page for more information on the features of ``torchrun``.
Usage
--------
Single-node multi-worker
++++++++++++++++++++++++++++++
::
torchrun
--standalone
--nnodes=1
--nproc_per_node=$NUM_TRAINERS
YOUR_TRAINING_SCRIPT.py (--arg1 ... train script args...)
Stacked single-node multi-worker
+++++++++++++++++++++++++++++++++++
To run multiple instances (separate jobs) of single-node, multi-worker on the
same host, we need to make sure that each instance (job) is
setup on different ports to avoid port conflicts (or worse, two jobs being merged
as a single job). To do this you have to run with ``--rdzv_backend=c10d``
and specify a different port by setting ``--rdzv_endpoint=localhost:$PORT_k``.
For ``--nodes=1``, its often convenient to let ``torchrun`` pick a free random
port automatically instead of manually assgining different ports for each run.
::
torchrun
--rdzv_backend=c10d
--rdzv_endpoint=localhost:0
--nnodes=1
--nproc_per_node=$NUM_TRAINERS
YOUR_TRAINING_SCRIPT.py (--arg1 ... train script args...)
Fault tolerant (fixed sized number of workers, no elasticity, tolerates 3 failures)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
::
torchrun
--nnodes=$NUM_NODES
--nproc_per_node=$NUM_TRAINERS
--max_restarts=3
--rdzv_id=$JOB_ID
--rdzv_backend=c10d
--rdzv_endpoint=$HOST_NODE_ADDR
YOUR_TRAINING_SCRIPT.py (--arg1 ... train script args...)
``HOST_NODE_ADDR``, in form <host>[:<port>] (e.g. node1.example.com:29400), specifies the node and
the port on which the C10d rendezvous backend should be instantiated and hosted. It can be any
node in your training cluster, but ideally you should pick a node that has a high bandwidth.
.. note::
If no port number is specified ``HOST_NODE_ADDR`` defaults to 29400.
Elastic (``min=1``, ``max=4``, tolerates up to 3 membership changes or failures)
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
::
torchrun
--nnodes=1:4
--nproc_per_node=$NUM_TRAINERS
--max_restarts=3
--rdzv_id=$JOB_ID
--rdzv_backend=c10d
--rdzv_endpoint=$HOST_NODE_ADDR
YOUR_TRAINING_SCRIPT.py (--arg1 ... train script args...)
``HOST_NODE_ADDR``, in form <host>[:<port>] (e.g. node1.example.com:29400), specifies the node and
the port on which the C10d rendezvous backend should be instantiated and hosted. It can be any
node in your training cluster, but ideally you should pick a node that has a high bandwidth.
.. note::
If no port number is specified ``HOST_NODE_ADDR`` defaults to 29400.
Note on rendezvous backend
------------------------------
For multi-node training you need to specify:
1. ``--rdzv_id``: A unique job id (shared by all nodes participating in the job)
2. ``--rdzv_backend``: An implementation of
:py:class:`torch.distributed.elastic.rendezvous.RendezvousHandler`
3. ``--rdzv_endpoint``: The endpoint where the rendezvous backend is running; usually in form
``host:port``.
Currently ``c10d`` (recommended), ``etcd-v2``, and ``etcd`` (legacy) rendezvous backends are
supported out of the box. To use ``etcd-v2`` or ``etcd``, setup an etcd server with the ``v2`` api
enabled (e.g. ``--enable-v2``).
.. warning::
``etcd-v2`` and ``etcd`` rendezvous use etcd API v2. You MUST enable the v2 API on the etcd
server. Our tests use etcd v3.4.3.
.. warning::
For etcd-based rendezvous we recommend using ``etcd-v2`` over ``etcd`` which is functionally
equivalent, but uses a revised implementation. ``etcd`` is in maintenance mode and will be
removed in a future version.
Definitions
--------------
1. ``Node`` - A physical instance or a container; maps to the unit that the job manager works with.
2. ``Worker`` - A worker in the context of distributed training.
3. ``WorkerGroup`` - The set of workers that execute the same function (e.g. trainers).
4. ``LocalWorkerGroup`` - A subset of the workers in the worker group running on the same node.
5. ``RANK`` - The rank of the worker within a worker group.
6. ``WORLD_SIZE`` - The total number of workers in a worker group.
7. ``LOCAL_RANK`` - The rank of the worker within a local worker group.
8. ``LOCAL_WORLD_SIZE`` - The size of the local worker group.
9. ``rdzv_id`` - A user-defined id that uniquely identifies the worker group for a job. This id is
used by each node to join as a member of a particular worker group.
9. ``rdzv_backend`` - The backend of the rendezvous (e.g. ``c10d``). This is typically a strongly
consistent key-value store.
10. ``rdzv_endpoint`` - The rendezvous backend endpoint; usually in form ``<host>:<port>``.
A ``Node`` runs ``LOCAL_WORLD_SIZE`` workers which comprise a ``LocalWorkerGroup``. The union of
all ``LocalWorkerGroups`` in the nodes in the job comprise the ``WorkerGroup``.
Environment Variables
----------------------
The following environment variables are made available to you in your script:
1. ``LOCAL_RANK`` - The local rank.
2. ``RANK`` - The global rank.
3. ``GROUP_RANK`` - The rank of the worker group. A number between 0 and ``max_nnodes``. When
running a single worker group per node, this is the rank of the node.
4. ``ROLE_RANK`` - The rank of the worker across all the workers that have the same role. The role
of the worker is specified in the ``WorkerSpec``.
5. ``LOCAL_WORLD_SIZE`` - The local world size (e.g. number of workers running locally); equals to
``--nproc_per_node`` specified on ``torchrun``.
6. ``WORLD_SIZE`` - The world size (total number of workers in the job).
7. ``ROLE_WORLD_SIZE`` - The total number of workers that was launched with the same role specified
in ``WorkerSpec``.
8. ``MASTER_ADDR`` - The FQDN of the host that is running worker with rank 0; used to initialize
the Torch Distributed backend.
9. ``MASTER_PORT`` - The port on the ``MASTER_ADDR`` that can be used to host the C10d TCP store.
10. ``TORCHELASTIC_RESTART_COUNT`` - The number of worker group restarts so far.
11. ``TORCHELASTIC_MAX_RESTARTS`` - The configured maximum number of restarts.
12. ``TORCHELASTIC_RUN_ID`` - Equal to the rendezvous ``run_id`` (e.g. unique job id).
13. ``PYTHON_EXEC`` - System executable override. If provided, the python user script will
use the value of ``PYTHON_EXEC`` as executable. The `sys.executable` is used by default.
Deployment
------------
1. (Not needed for the C10d backend) Start the rendezvous backend server and get the endpoint (to be
passed as ``--rdzv_endpoint`` to the launcher script)
2. Single-node multi-worker: Start the launcher on the host to start the agent process which
creates and monitors a local worker group.
3. Multi-node multi-worker: Start the launcher with the same arguments on all the nodes
participating in training.
When using a job/cluster manager the entry point command to the multi-node job should be this
launcher.
Failure Modes
---------------
1. Worker failure: For a training job with ``n`` workers, if ``k<=n`` workers fail all workers
are stopped and restarted up to ``max_restarts``.
2. Agent failure: An agent failure results in a local worker group failure. It is up to the job
manager to fail the entire job (gang semantics) or attempt to replace the node. Both behaviors
are supported by the agent.
3. Node failure: Same as agent failure.
Membership Changes
--------------------
1. Node departure (scale-down): The agent is notified of the departure, all existing workers are
stopped, a new ``WorkerGroup`` is formed, and all workers are started with a new ``RANK`` and
``WORLD_SIZE``.
2. Node arrival (scale-up): The new node is admitted to the job, all existing workers are stopped,
a new ``WorkerGroup`` is formed, and all workers are started with a new ``RANK`` and
``WORLD_SIZE``.
Important Notices
--------------------
1. This utility and multi-process distributed (single-node or
multi-node) GPU training currently only achieves the best performance using
the NCCL distributed backend. Thus NCCL backend is the recommended backend to
use for GPU training.
2. The environment variables necessary to initialize a Torch process group are provided to you by
this module, no need for you to pass ``RANK`` manually. To initialize a process group in your
training script, simply run:
::
>>> # xdoctest: +SKIP("stub")
>>> import torch.distributed as dist
>>> dist.init_process_group(backend="gloo|nccl")
3. In your training program, you can either use regular distributed functions
or use :func:`torch.nn.parallel.DistributedDataParallel` module. If your
training program uses GPUs for training and you would like to use
:func:`torch.nn.parallel.DistributedDataParallel` module,
here is how to configure it.
::
local_rank = int(os.environ["LOCAL_RANK"])
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[local_rank],
output_device=local_rank)
Please ensure that ``device_ids`` argument is set to be the only GPU device id
that your code will be operating on. This is generally the local rank of the
process. In other words, the ``device_ids`` needs to be ``[int(os.environ("LOCAL_RANK"))]``,
and ``output_device`` needs to be ``int(os.environ("LOCAL_RANK"))`` in order to use this
utility
4. On failures or membership changes ALL surviving workers are killed immediately. Make sure to
checkpoint your progress. The frequency of checkpoints should depend on your job's tolerance
for lost work.
5. This module only supports homogeneous ``LOCAL_WORLD_SIZE``. That is, it is assumed that all
nodes run the same number of local workers (per role).
6. ``RANK`` is NOT stable. Between restarts, the local workers on a node can be assgined a
different range of ranks than before. NEVER hard code any assumptions about the stable-ness of
ranks or some correlation between ``RANK`` and ``LOCAL_RANK``.
7. When using elasticity (``min_size!=max_size``) DO NOT hard code assumptions about
``WORLD_SIZE`` as the world size can change as nodes are allowed to leave and join.
8. It is recommended for your script to have the following structure:
::
def main():
load_checkpoint(checkpoint_path)
initialize()
train()
def train():
for batch in iter(dataset):
train_step(batch)
if should_checkpoint:
save_checkpoint(checkpoint_path)
9. (Recommended) On worker errors, this tool will summarize the details of the error
(e.g. time, rank, host, pid, traceback, etc). On each node, the first error (by timestamp)
is heuristically reported as the "Root Cause" error. To get tracebacks as part of this
error summary print out, you must decorate your main entrypoint function in your
training script as shown in the example below. If not decorated, then the summary
will not include the traceback of the exception and will only contain the exitcode.
For details on torchelastic error handling see: https://pytorch.org/docs/stable/elastic/errors.html
::
from torch.distributed.elastic.multiprocessing.errors import record
@record
def main():
# do train
pass
if __name__ == "__main__":
main()
"""
import logging
import os
import sys
import uuid
from argparse import REMAINDER, ArgumentParser
from typing import Callable, List, Tuple, Union
import torch
from torch.distributed.argparse_util import check_env, env
from torch.distributed.elastic.multiprocessing import Std
from torch.distributed.elastic.multiprocessing.errors import record
from torch.distributed.elastic.rendezvous.utils import _parse_rendezvous_config
from torch.distributed.elastic.utils import macros
from torch.distributed.elastic.utils.logging import get_logger
from torch.distributed.launcher.api import LaunchConfig, elastic_launch
log = get_logger()
def get_args_parser() -> ArgumentParser:
"""Helper function parsing the command line options."""
parser = ArgumentParser(description="Torch Distributed Elastic Training Launcher")
#
# Worker/node size related arguments.
#
parser.add_argument(
"--nnodes",
action=env,
type=str,
default="1:1",
help="Number of nodes, or the range of nodes in form <minimum_nodes>:<maximum_nodes>.",
)
parser.add_argument(
"--nproc_per_node",
action=env,
type=str,
default="1",
help="Number of workers per node; supported values: [auto, cpu, gpu, int].",
)
#
# Rendezvous related arguments
#
parser.add_argument(
"--rdzv_backend",
action=env,
type=str,
default="static",
help="Rendezvous backend.",
)
parser.add_argument(
"--rdzv_endpoint",
action=env,
type=str,
default="",
help="Rendezvous backend endpoint; usually in form <host>:<port>.",
)
parser.add_argument(
"--rdzv_id",
action=env,
type=str,
default="none",
help="User-defined group id.",
)
parser.add_argument(
"--rdzv_conf",
action=env,
type=str,
default="",
help="Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).",
)
parser.add_argument(
"--standalone",
action=check_env,
help="Start a local standalone rendezvous backend that is represented by a C10d TCP store "
"on port 29400. Useful when launching single-node, multi-worker job. If specified "
"--rdzv_backend, --rdzv_endpoint, --rdzv_id are auto-assigned; any explicitly set values "
"are ignored.",
)
#
# User-code launch related arguments.
#
parser.add_argument(
"--max_restarts",
action=env,
type=int,
default=0,
help="Maximum number of worker group restarts before failing.",
)
parser.add_argument(
"--monitor_interval",
action=env,
type=float,
default=5,
help="Interval, in seconds, to monitor the state of workers.",
)
parser.add_argument(
"--start_method",
action=env,
type=str,
default="spawn",
choices=["spawn", "fork", "forkserver"],
help="Multiprocessing start method to use when creating workers.",
)
parser.add_argument(
"--role",
action=env,
type=str,
default="default",
help="User-defined role for the workers.",
)
parser.add_argument(
"-m",
"--module",
action=check_env,
help="Change each process to interpret the launch script as a Python module, executing "
"with the same behavior as 'python -m'.",
)
parser.add_argument(
"--no_python",
action=check_env,
help="Skip prepending the training script with 'python' - just execute it directly. Useful "
"when the script is not a Python script.",
)
parser.add_argument(
"--run_path",
action=check_env,
help="Run the training script with runpy.run_path in the same interpreter."
" Script must be provided as an abs path (e.g. /abs/path/script.py)."
" Takes precedence over --no_python.",
)
parser.add_argument(
"--log_dir",
action=env,
type=str,
default=None,
help="Base directory to use for log files (e.g. /var/log/torch/elastic). The same "
"directory is re-used for multiple runs (a unique job-level sub-directory is created with "
"rdzv_id as the prefix).",
)
parser.add_argument(
"-r",
"--redirects",
action=env,
type=str,
default="0",
help="Redirect std streams into a log file in the log directory (e.g. [-r 3] redirects "
"both stdout+stderr for all workers, [-r 0:1,1:2] redirects stdout for local rank 0 and "
"stderr for local rank 1).",
)
parser.add_argument(
"-t",
"--tee",
action=env,
type=str,
default="0",
help="Tee std streams into a log file and also to console (see --redirects for format).",
)
#
# Backwards compatible parameters with caffe2.distributed.launch.
#
parser.add_argument(
"--node_rank",
type=int,
action=env,
default=0,
help="Rank of the node for multi-node distributed training.",
)
parser.add_argument(
"--master_addr",
default="127.0.0.1",
type=str,
action=env,
help="Address of the master node (rank 0). It should be either the IP address or the "
"hostname of rank 0. For single node multi-proc training the --master_addr can simply be "
"127.0.0.1; IPv6 should have the pattern `[0:0:0:0:0:0:0:1]`.",
)
parser.add_argument(
"--master_port",
default=29500,
type=int,
action=env,
help="Port on the master node (rank 0) to be used for communication during distributed "
"training.",
)
#
# Positional arguments.
#
parser.add_argument(
"training_script",
type=str,
help="Full path to the (single GPU) training program/script to be launched in parallel, "
"followed by all the arguments for the training script.",
)
# Rest from the training program.
parser.add_argument("training_script_args", nargs=REMAINDER)
return parser
def parse_args(args):
parser = get_args_parser()
return parser.parse_args(args)
def parse_min_max_nnodes(nnodes: str):
arr = nnodes.split(":")
if len(arr) == 1:
min_nodes = max_nodes = int(arr[0])
elif len(arr) == 2:
min_nodes = int(arr[0])
max_nodes = int(arr[1])
else:
raise RuntimeError(f'nnodes={nnodes} is not in "MIN:MAX" format')
return min_nodes, max_nodes
def determine_local_world_size(nproc_per_node: str):
try:
logging.info(f"Using nproc_per_node={nproc_per_node}.")
return int(nproc_per_node)
except ValueError:
if nproc_per_node == "cpu":
num_proc = os.cpu_count()
device_type = "cpu"
elif nproc_per_node == "gpu":
if not torch.cuda.is_available():
raise ValueError("Cuda is not available.")
device_type = "gpu"
num_proc = torch.cuda.device_count()
elif nproc_per_node == "auto":
if torch.cuda.is_available():
num_proc = torch.cuda.device_count()
device_type = "gpu"
else:
num_proc = os.cpu_count()
device_type = "cpu"
else:
raise ValueError(f"Unsupported nproc_per_node value: {nproc_per_node}")
log.info(
f"Using nproc_per_node={nproc_per_node},"
f" seting to {num_proc} since the instance "
f"has {os.cpu_count()} {device_type}"
)
return num_proc
def get_rdzv_endpoint(args):
if args.rdzv_backend == "static" and not args.rdzv_endpoint:
return f"{args.master_addr}:{args.master_port}"
return args.rdzv_endpoint
def get_use_env(args) -> bool:
"""
Retrieves ``use_env`` from the args.
``use_env`` is a legacy argument, if ``use_env`` is False, the
``--node_rank`` argument will be transferred to all worker processes.
``use_env`` is only used by the ``torch.distributed.launch`` and will
be deprecated in future releases.
"""
if not hasattr(args, "use_env"):
return True
return args.use_env
def config_from_args(args) -> Tuple[LaunchConfig, Union[Callable, str], List[str]]:
# If ``args`` not passed, defaults to ``sys.argv[:1]``
min_nodes, max_nodes = parse_min_max_nnodes(args.nnodes)
assert 0 < min_nodes <= max_nodes
assert args.max_restarts >= 0
nproc_per_node = determine_local_world_size(args.nproc_per_node)
if "OMP_NUM_THREADS" not in os.environ and nproc_per_node > 1:
omp_num_threads = 1
log.warning(
f"\n*****************************************\n"
f"Setting OMP_NUM_THREADS environment variable for each process to be "
f"{omp_num_threads} in default, to avoid your system being overloaded, "
f"please further tune the variable for optimal performance in "
f"your application as needed. \n"
f"*****************************************"
)
# This env variable will be passed down to the subprocesses
os.environ["OMP_NUM_THREADS"] = str(omp_num_threads)
rdzv_configs = _parse_rendezvous_config(args.rdzv_conf)
if args.rdzv_backend == "static":
rdzv_configs["rank"] = args.node_rank
rdzv_endpoint = get_rdzv_endpoint(args)
config = LaunchConfig(
min_nodes=min_nodes,
max_nodes=max_nodes,
nproc_per_node=nproc_per_node,
run_id=args.rdzv_id,
role=args.role,
rdzv_endpoint=rdzv_endpoint,
rdzv_backend=args.rdzv_backend,
rdzv_configs=rdzv_configs,
max_restarts=args.max_restarts,
monitor_interval=args.monitor_interval,
start_method=args.start_method,
redirects=Std.from_str(args.redirects),
tee=Std.from_str(args.tee),
log_dir=args.log_dir,
)
with_python = not args.no_python
cmd: Union[Callable, str]
cmd_args = []
use_env = get_use_env(args)
if args.run_path:
cmd = run_script_path
cmd_args.append(args.training_script)
else:
if with_python:
cmd = os.getenv("PYTHON_EXEC", sys.executable)
cmd_args.append("-u")
if args.module:
cmd_args.append("-m")
cmd_args.append(args.training_script)
else:
if args.module:
raise ValueError(
"Don't use both the '--no_python' flag"
" and the '--module' flag at the same time."
)
cmd = args.training_script
if not use_env:
cmd_args.append(f"--local_rank={macros.local_rank}")
cmd_args.extend(args.training_script_args)
return config, cmd, cmd_args
def run_script_path(training_script: str, *training_script_args: str):
"""
Runs the provided `training_script` from within this interpreter.
Usage: `script_as_function("/abs/path/to/script.py", "--arg1", "val1")`
"""
import runpy
import sys
sys.argv = [training_script] + [*training_script_args]
runpy.run_path(sys.argv[0], run_name="__main__")
def run(args):
if args.standalone:
args.rdzv_backend = "c10d"
args.rdzv_endpoint = "localhost:29400"
args.rdzv_id = str(uuid.uuid4())
log.info(
f"\n**************************************\n"
f"Rendezvous info:\n"
f"--rdzv_backend={args.rdzv_backend} "
f"--rdzv_endpoint={args.rdzv_endpoint} "
f"--rdzv_id={args.rdzv_id}\n"
f"**************************************\n"
)
config, cmd, cmd_args = config_from_args(args)
elastic_launch(
config=config,
entrypoint=cmd,
)(*cmd_args)
@record
def main(args=None):
args = parse_args(args)
run(args)
if __name__ == "__main__":
main()
| pytorch-master | torch/distributed/run.py |
import contextlib
import io
import logging
import os
import pickle
import time
import warnings
from datetime import timedelta
from typing import Callable, Dict, Optional, Tuple, Union
import torch
from torch._C._distributed_c10d import (
AllreduceCoalescedOptions,
AllreduceOptions,
AllToAllOptions,
BarrierOptions,
BroadcastOptions,
GatherOptions,
PrefixStore,
ProcessGroup,
ReduceOp,
ReduceOptions,
ReduceScatterOptions,
ScatterOptions,
Store,
DebugLevel,
get_debug_level,
)
from torch._six import string_classes
from .constants import default_pg_timeout
from .rendezvous import register_rendezvous_handler, rendezvous # noqa: F401
# This module is wildcard imported from torch.distributed.
# TODO: specify __all__
_MPI_AVAILABLE = True
_NCCL_AVAILABLE = True
_GLOO_AVAILABLE = True
_UCC_AVAILABLE = True
_pickler = pickle.Pickler
_unpickler = pickle.Unpickler
try:
from torch._C._distributed_c10d import ProcessGroupMPI
except ImportError:
_MPI_AVAILABLE = False
try:
from torch._C._distributed_c10d import ProcessGroupNCCL
except ImportError:
_NCCL_AVAILABLE = False
try:
from torch._C._distributed_c10d import ProcessGroupGloo
from torch._C._distributed_c10d import _ProcessGroupWrapper
except ImportError:
_GLOO_AVAILABLE = False
try:
from torch._C._distributed_c10d import ProcessGroupUCC
ProcessGroupUCC.__module__ = "torch.distributed.distributed_c10d"
except ImportError:
_UCC_AVAILABLE = False
logger = logging.getLogger(__name__)
PG_WRAPPER_STORE_PREFIX = "pg_wrapper"
# Some reduce ops are not supported by complex numbers and will result in an error.
# We currently provide complex support to the distributed API by viewing
# complex tensors as real (torch.view_as_real), meaning that calling
# these unsupported ops will return garbage values rather than error out.
# (e.g. max(2+3i, 3+2i) = 3+3i)
# We'd like calls to unsupported ops to error out accordingly,
# rather than returning garbage values.
def supports_complex(reduceOp: ReduceOp) -> bool:
denyList = [
ReduceOp.MAX,
ReduceOp.MIN,
ReduceOp.PRODUCT,
ReduceOp.BAND,
ReduceOp.BOR,
ReduceOp.BXOR,
]
return reduceOp not in denyList
class Backend(object):
"""
An enum-like class of available backends: GLOO, NCCL, UCC, MPI, and other registered
backends.
The values of this class are lowercase strings, e.g., ``"gloo"``. They can
be accessed as attributes, e.g., ``Backend.NCCL``.
This class can be directly called to parse the string, e.g.,
``Backend(backend_str)`` will check if ``backend_str`` is valid, and
return the parsed lowercase string if so. It also accepts uppercase strings,
e.g., ``Backend("GLOO")`` returns ``"gloo"``.
.. note:: The entry ``Backend.UNDEFINED`` is present but only used as
initial value of some fields. Users should neither use it directly
nor assume its existence.
"""
UNDEFINED = "undefined"
GLOO = "gloo"
NCCL = "nccl"
UCC = "ucc"
MPI = "mpi"
TCP = "tcp"
_plugins: Dict[str, Callable] = {}
def __new__(cls, name: str):
if not isinstance(name, string_classes):
raise ValueError("Backend name must be a string, but got: {}".format(name))
value = getattr(Backend, name.upper(), Backend.UNDEFINED)
if value == Backend.TCP:
raise ValueError(
"TCP backend has been deprecated. Please use "
"Gloo or MPI backend for collective operations "
"on CPU tensors."
)
elif value == Backend.UNDEFINED:
raise ValueError("Invalid backend: '{}'".format(name))
elif value != Backend.GLOO and value != Backend.NCCL and value != Backend.UCC and value != Backend.MPI:
value = name.lower()
return value
@classmethod
def register_backend(cls, name, func):
"""
Registers a new backend with the given name and instantiating function.
This class method is used by 3rd party ``ProcessGroup`` extension to
register new backends.
Args:
name (str): Backend name of the ``ProcessGroup`` extension. It
should match the one in ``init_process_group()``.
func (function): Function handler that instantiates the backend.
The function should be implemented in the backend
extension and takes four arguments, including
``store``, ``rank``, ``world_size``, and ``timeout``.
.. note:: This support of 3rd party backend is experimental and subject to change.
"""
# Allow UCC plugin if Pytorch is not built with native support.
# TODO: remove this exception once UCC plugin is fully deprecated.
if (name != Backend.UCC or (name == Backend.UCC and is_ucc_available())):
assert not hasattr(Backend, name.upper()), (
f"{name.upper()} c10d backend already exist"
)
assert name.upper() not in Backend._plugins, (
f"{name.upper()} c10d backend creator function already exist"
)
setattr(Backend, name.upper(), name.upper())
Backend._plugins[name.upper()] = func
# `_backend`, `dist_backend`, and `reduce_op` are here to maintain backward
# compatibility with pre-c10d distributed package.
# TODO: remove them when users are ready to take a hard dependency on PyTorch 1.
_backend: str = Backend.UNDEFINED
dist_backend = Backend
class _reduce_op(object):
r"""
Deprecated enum-like class for reduction operations: ``SUM``, ``PRODUCT``,
``MIN``, and ``MAX``.
:class:`~torch.distributed.ReduceOp` is recommended to use instead.
"""
def __init__(self):
# __members__ is a dict storing key-value pairs for enum classes
for k, v in ReduceOp.__members__.items():
setattr(self, k, v)
self.__members__ = ReduceOp.__members__
def __getattribute__(self, key):
warnings.warn(
"torch.distributed.reduce_op is deprecated, please use "
"torch.distributed.ReduceOp instead"
)
return object.__getattribute__(self, key)
reduce_op = _reduce_op()
class group(object):
# Points to the default PG once initialized.
WORLD: Optional[ProcessGroup] = None
class GroupMember(object):
# Alias to group.WORLD for backward compatibility
WORLD = group.WORLD
NON_GROUP_MEMBER = object()
# Cached process groups
# For NCCL and GLOO pg, it is a map from ProcessGroup to (Backend, Store)
# For MPI pg, it is a map from ProcessGroup to (Backend, None)
_pg_map: Dict[ProcessGroup, Tuple[str, Optional[Store]]] = {}
# Process group's names, map from ProcessGroup to str
_pg_names: Dict[ProcessGroup, str] = {}
# Process group's global rank to local rank mapping
_pg_group_ranks: Dict[ProcessGroup, Dict[int, int]] = {}
# Default process group state
_default_pg_init_method = None
# Process group count for default naming
_group_count = 0
STORE_BASED_BARRIER_PREFIX = "store_based_barrier_key"
def _get_pg_device(group: ProcessGroup):
"""
Returns the device to use with ``group``.
This is cuda for NCCL and CPU for everything else
"""
if _check_for_nccl_backend(group):
return torch.device("cuda", torch.cuda.current_device())
return torch.device("cpu")
def _store_based_barrier(rank, store, timeout):
"""
Barrier based on store which is used for synchronizing processes after
``init_process_group`` or ``new_group``. Intended to be used only with
those two methods and is not a generic alternative to ``barrier()``.
"""
store_key = "{}:{}".format(STORE_BASED_BARRIER_PREFIX, _group_count)
store.add(store_key, 1)
logger.info("Added key: {} to store for rank: {}".format(store_key, rank))
# Now wait for all workers to check in with the store.
world_size = get_world_size()
# Use 'add' instead of 'get' since for some store implementations 'add'
# doesn't work well with 'get'. Ideally the store implementations should
# be fixed, but for backward compatiblity reasons it is risky to change
# the store implementations. Once, we completely migrate away from these
# legacy stores, we can use 'get' here instead.
worker_count = store.add(store_key, 0)
start = time.time()
log_time = time.time()
while worker_count != world_size:
time.sleep(0.01)
worker_count = store.add(store_key, 0)
# Print status periodically to keep track.
if timedelta(seconds=(time.time() - log_time)) > timedelta(seconds=10):
logger.info(
"Waiting in store based barrier to initialize process group for "
"rank: {}, key: {} (world_size={}, worker_count={}, timeout={})".format(
rank, store_key, world_size, worker_count, timeout
)
)
log_time = time.time()
if timedelta(seconds=(time.time() - start)) > timeout:
raise RuntimeError(
"Timed out initializing process group in store based barrier on "
"rank: {}, for key: {} (world_size={}, worker_count={}, timeout={})".format(
rank, store_key, world_size, worker_count, timeout
)
)
logger.info(
f"Rank {rank}: Completed store-based barrier for key:{store_key} with {world_size} nodes."
)
def _rank_not_in_group(group: ProcessGroup):
"""
Helper that checks if the current process's rank is not in a given group.
"""
if group is None:
return False
return group == GroupMember.NON_GROUP_MEMBER
def _warn_not_in_group(op_name):
global_rank = -1 if GroupMember.WORLD is None else GroupMember.WORLD.rank()
warnings.warn(
f"Running {op_name} on global rank {global_rank} which does not "
"belong to the given group."
)
def _get_group_rank(group: ProcessGroup, rank):
"""
Helper that gets a given group's local rank in the group from a given global
rank.
"""
if group is GroupMember.WORLD:
raise RuntimeError(
"group.WORLD does not have local rank to global " "rank mapping"
)
if group not in _pg_group_ranks:
raise RuntimeError("The given group does not exist")
try:
group_rank = _pg_group_ranks[group][rank]
except KeyError:
raise RuntimeError(
f"The global rank {rank} is not part of the group {group}"
) from None
return group_rank
def _get_global_rank(group, group_rank):
"""
Helper that gets a given group's global rank from a given local rank in the
group.
"""
if group is GroupMember.WORLD:
raise RuntimeError(
"group.WORLD does not have local rank to global " "rank mapping"
)
group_rank_map = _pg_group_ranks[group]
for rank, grp_rank in group_rank_map.items():
if grp_rank == group_rank:
return rank
raise RuntimeError("The group rank is not part of the group")
def _get_group_size(group):
"""
Helper that gets a given group's world size.
"""
if group is GroupMember.WORLD or group is None:
default_pg = _get_default_group()
return default_pg.size()
return group.size()
def _check_single_tensor(param, param_name):
"""
Helper to check that the parameter ``param_name`` is a single tensor.
"""
if not isinstance(param, torch.Tensor):
raise RuntimeError(
"Invalid function argument. Expected parameter `{}` "
"to be of type torch.Tensor.".format(param_name)
)
def _check_tensor_list(param, param_name):
"""
Helper to check that the parameter ``param_name`` is a list of tensors.
"""
if not isinstance(param, list) or not all(
isinstance(p, torch.Tensor) for p in param
):
raise RuntimeError(
"Invalid function argument. Expected parameter `{}` "
"to be of type List[torch.Tensor].".format(param_name)
)
def _check_op(op):
"""
Helper to check that the ``op`` is either isend or irecv.
"""
if op not in [isend, irecv]:
raise RuntimeError(
"Invalid ``op``. Expected ``op`` "
"to be of type ``torch.distributed.isend`` or "
"``torch.distributed.irecv``."
)
def _check_p2p_op_list(p2p_op_list):
"""
Helper to check that the ``p2p_op_list`` is a list of P2POp instances and
all ops use the same group.
"""
if not isinstance(p2p_op_list, list) or not all(
isinstance(p2p_op, P2POp) for p2p_op in p2p_op_list
):
raise RuntimeError(
"Invalid ``p2p_op_list``. Each op is expected to "
"to be of type ``torch.distributed.P2POp``."
)
group = p2p_op_list[0].group
if not all(group == p2p_op.group for p2p_op in p2p_op_list):
raise RuntimeError("All ops need to use the same group.")
def is_mpi_available():
"""
Checks if the MPI backend is available.
"""
return _MPI_AVAILABLE
def is_nccl_available():
"""
Checks if the NCCL backend is available.
"""
return _NCCL_AVAILABLE
def is_gloo_available():
"""
Checks if the Gloo backend is available.
"""
return _GLOO_AVAILABLE
def is_ucc_available():
"""
Checks if the UCC backend is available.
"""
return _UCC_AVAILABLE
def is_initialized():
"""
Checking if the default process group has been initialized
"""
return GroupMember.WORLD is not None
def is_torchelastic_launched():
"""
Checks whether this process was launched with ``torch.distributed.elastic``
(aka torchelastic). The existence of ``TORCHELASTIC_RUN_ID`` environment
variable is used as a proxy to determine whether the current process
was launched with torchelastic. This is a reasonable proxy since
``TORCHELASTIC_RUN_ID`` maps to the rendezvous id which is always a
non-null value indicating the job id for peer discovery purposes..
"""
return os.getenv("TORCHELASTIC_RUN_ID") is not None
def _get_default_group():
"""
Getting the default process group created by init_process_group
"""
if not is_initialized():
raise RuntimeError(
"Default process group has not been initialized, "
"please make sure to call init_process_group."
)
return GroupMember.WORLD
def _get_default_store():
"""
Getting the default store created by init_process_group
"""
if not is_initialized():
raise RuntimeError(
"Default process group has not been initialized, "
"please make sure to call init_process_group."
)
default_pg = _get_default_group()
_, default_store = _pg_map[default_pg]
return default_store
def _update_default_pg(pg):
GroupMember.WORLD = group.WORLD = pg
def get_backend(group=None):
"""
Returns the backend of the given process group.
Args:
group (ProcessGroup, optional): The process group to work on. The
default is the general main process group. If another specific group
is specified, the calling process must be part of :attr:`group`.
Returns:
The backend of the given process group as a lower case string.
"""
if group is None:
pg = _get_default_group()
else:
pg = group
if _rank_not_in_group(pg):
raise RuntimeError("Invalid process group specified")
pg_store = _pg_map.get(pg, None)
assert pg_store is not None
return pg_store[0]
def init_process_group(
backend,
init_method=None,
timeout=default_pg_timeout,
world_size=-1,
rank=-1,
store=None,
group_name="",
pg_options=None,
):
"""
Initializes the default distributed process group, and this will also
initialize the distributed package.
There are 2 main ways to initialize a process group:
1. Specify ``store``, ``rank``, and ``world_size`` explicitly.
2. Specify ``init_method`` (a URL string) which indicates where/how
to discover peers. Optionally specify ``rank`` and ``world_size``,
or encode all required parameters in the URL and omit them.
If neither is specified, ``init_method`` is assumed to be "env://".
Args:
backend (str or Backend): The backend to use. Depending on
build-time configurations, valid values include ``mpi``, ``gloo``,
``nccl``, and ``ucc``. This field should be given as a lowercase
string (e.g., ``"gloo"``), which can also be accessed via
:class:`Backend` attributes (e.g., ``Backend.GLOO``). If using
multiple processes per machine with ``nccl`` backend, each process
must have exclusive access to every GPU it uses, as sharing GPUs
between processes can result in deadlocks. ``ucc`` backend is
experimental.
init_method (str, optional): URL specifying how to initialize the
process group. Default is "env://" if no
``init_method`` or ``store`` is specified.
Mutually exclusive with ``store``.
world_size (int, optional): Number of processes participating in
the job. Required if ``store`` is specified.
rank (int, optional): Rank of the current process (it should be a
number between 0 and ``world_size``-1).
Required if ``store`` is specified.
store(Store, optional): Key/value store accessible to all workers, used
to exchange connection/address information.
Mutually exclusive with ``init_method``.
timeout (timedelta, optional): Timeout for operations executed against
the process group. Default value equals 30 minutes.
This is applicable for the ``gloo`` backend. For ``nccl``, this is
applicable only if the environment variable ``NCCL_BLOCKING_WAIT``
or ``NCCL_ASYNC_ERROR_HANDLING`` is set to 1. When
``NCCL_BLOCKING_WAIT`` is set, this is the duration for which the
process will block and wait for collectives to complete before
throwing an exception. When ``NCCL_ASYNC_ERROR_HANDLING`` is set,
this is the duration after which collectives will be aborted
asynchronously and the process will crash. ``NCCL_BLOCKING_WAIT``
will provide errors to the user which can be caught and handled,
but due to its blocking nature, it has a performance overhead. On
the other hand, ``NCCL_ASYNC_ERROR_HANDLING`` has very little
performance overhead, but crashes the process on errors. This is
done since CUDA execution is async and it is no longer safe to
continue executing user code since failed async NCCL operations
might result in subsequent CUDA operations running on corrupted
data. Only one of these two environment variables should be set.
For ``ucc``, blocking wait is supported similar to NCCL. However,
async error handling is done differently since with UCC we have
progress thread and not watch-dog thread.
group_name (str, optional, deprecated): Group name.
pg_options (ProcessGroupOptions, optional): process group options
specifying what additional options need to be passed in during
the construction of specific process groups. As of now, the only
options we support is ``ProcessGroupNCCL.Options`` for the ``nccl``
backend, ``is_high_priority_stream`` can be specified so that
the nccl backend can pick up high priority cuda streams when
there're compute kernels waiting.
.. note:: To enable ``backend == Backend.MPI``, PyTorch needs to be built from source
on a system that supports MPI.
"""
global _pg_group_ranks
global _backend
global _default_pg_init_method
if not isinstance(timeout, timedelta):
raise RuntimeError(
"Expected timeout argument to be of type" "datetime.timedelta"
)
if GroupMember.WORLD is not None:
raise RuntimeError("trying to initialize the default process group " "twice!")
assert (store is None) or (
init_method is None
), "Cannot specify both init_method and store."
if store is not None:
assert world_size > 0, "world_size must be positive if using store"
assert rank >= 0, "rank must be non-negative if using store"
elif init_method is None:
init_method = "env://"
backend = Backend(backend)
if backend == Backend.MPI:
if world_size != -1 or rank != -1:
warnings.warn(
"For MPI backend, world_size ({}) and rank ({}) "
"are ignored since they are assigned by the "
"MPI runtime.".format(world_size, rank)
)
default_pg = _new_process_group_helper(
-1, -1, [], Backend.MPI, None, group_name=group_name, timeout=timeout
)
_update_default_pg(default_pg)
else:
# backward compatible API
if store is None:
rendezvous_iterator = rendezvous(
init_method, rank, world_size, timeout=timeout
)
store, rank, world_size = next(rendezvous_iterator)
store.set_timeout(timeout)
# Use a PrefixStore to avoid accidental overrides of keys used by
# different systems (e.g. RPC) in case the store is multi-tenant.
store = PrefixStore("default_pg", store)
default_pg = _new_process_group_helper(
world_size,
rank,
[],
backend,
store,
pg_options=pg_options,
group_name=group_name,
timeout=timeout,
)
_update_default_pg(default_pg)
_pg_group_ranks[GroupMember.WORLD] = {i: i for i in range(GroupMember.WORLD.size())} # type: ignore[attr-defined, index]
_backend = _pg_map[GroupMember.WORLD][0] # type: ignore[index]
_default_pg_init_method = init_method
# barrier at the end to ensure that once we return from this method, all
# process groups including global variables are updated correctly on all
# ranks.
if backend == Backend.MPI:
# MPI backend doesn't use store.
barrier()
else:
# Use store based barrier here since barrier() used a bunch of
# default devices and messes up NCCL internal state.
_store_based_barrier(rank, store, timeout)
# Set sequence numbers for gloo and nccl process groups.
if get_backend(default_pg) in [Backend.GLOO, Backend.NCCL]:
default_pg._set_sequence_number_for_group()
def _new_process_group_helper(
world_size,
rank,
group_ranks,
backend,
store,
pg_options=None,
group_name=None,
timeout=default_pg_timeout,
):
"""
Create a new distributed process group.
This function must be called by ALL processes in the global group, even if
the calling process is not part of the newly created group. In that case,
this function returns GroupMember.NON_GROUP_MEMBER.
This function is called with ``group_ranks == []`` for the default group.
"""
global _pg_map
global _group_count
global _pg_names
if not group_name:
group_name = str(_group_count)
_group_count += 1
if group_name in _pg_names.values():
raise RuntimeError(
"The specified group name has already been "
"created, please use a different group name"
)
if not isinstance(timeout, timedelta):
raise RuntimeError(
"Expected timeout argument to be of type" "datetime.timedelta"
)
# The list of group ranks is empty if we're creating the default group.
is_default_group = len(group_ranks) == 0
backend = Backend(backend)
pg: Union[ProcessGroupGloo, ProcessGroupMPI, ProcessGroupNCCL, ProcessGroupUCC]
if backend == Backend.MPI:
if not is_mpi_available():
raise RuntimeError(
"Distributed package doesn't have MPI built in."
" MPI is only included if you build PyTorch from"
" source on a host that has MPI installed."
)
pg = ProcessGroupMPI.create(group_ranks)
if not pg:
return GroupMember.NON_GROUP_MEMBER
_pg_map[pg] = (Backend.MPI, None)
_pg_names[pg] = group_name
else:
# If this is a subgroup (which means group_ranks is specified),
# we check if the current process is a member of the new group.
if not is_default_group:
global_rank = _get_default_group().rank()
if global_rank not in group_ranks:
return GroupMember.NON_GROUP_MEMBER
# Use the group name as prefix in the default store, such that
# a single store can be reused by multiple groups.
prefix_store = PrefixStore(group_name, store)
if backend == Backend.GLOO:
if pg_options is not None:
raise RuntimeError("GLOO options not supported")
pg = ProcessGroupGloo(prefix_store, rank, world_size, timeout=timeout)
# In debug mode and if GLOO is available, wrap in a wrapper PG that
# enables enhanced collective checking for debugability.
if get_debug_level() == DebugLevel.DETAIL:
if not _GLOO_AVAILABLE:
logger.info(
"""TORCH_DISTRIBUTED_DEBUG was set to DETAIL, but
GLOO is not available. Build with Gloo to
create a wrapper process group in debug mode
to aid collective desynchronization debugging."""
)
else:
pg = _create_process_group_wrapper(
wrapped_pg=pg,
store_prefix=group_name,
store=store,
rank=rank,
world_size=world_size,
timeout=timeout,
)
_pg_map[pg] = (Backend.GLOO, store)
_pg_names[pg] = group_name
elif backend == Backend.NCCL:
if not is_nccl_available():
raise RuntimeError("Distributed package doesn't have NCCL " "built in")
if pg_options is not None:
assert isinstance(
pg_options, ProcessGroupNCCL.Options
), "Expected pg_options argument to be of type ProcessGroupNCCL.Options"
else:
# default pg_options for NCCL
pg_options = ProcessGroupNCCL.Options()
pg_options.is_high_priority_stream = False
pg_options._timeout = timeout
pg = ProcessGroupNCCL(prefix_store, rank, world_size, pg_options)
# In debug mode and if GLOO is available, wrap in a wrapper PG that
# enables enhanced collective checking for debugability.
if get_debug_level() == DebugLevel.DETAIL:
if not _GLOO_AVAILABLE:
logger.info(
"""TORCH_DISTRIBUTED_DEBUG was set to DETAIL, but
GLOO is not available. Build with Gloo to
create a wrapper process group in debug mode
to aid collective desynchronization debugging."""
)
else:
pg = _create_process_group_wrapper(
wrapped_pg=pg,
store_prefix=group_name,
store=store,
rank=rank,
world_size=world_size,
timeout=timeout,
)
_pg_map[pg] = (Backend.NCCL, store)
_pg_names[pg] = group_name
elif backend == Backend.UCC and is_ucc_available():
# TODO: once UCC plugin is fully deprecated, remove
# is_ucc_available() from above elif-condition and raise
# RuntimeError if is_ucc_available() returns false.
pg = ProcessGroupUCC(prefix_store, rank, world_size, timeout=timeout)
# In debug mode and if GLOO is available, wrap in a wrapper PG that
# enables enhanced collective checking for debugability.
if get_debug_level() == DebugLevel.DETAIL:
if not _GLOO_AVAILABLE:
logger.info(
"""TORCH_DISTRIBUTED_DEBUG was set to DETAIL, but
GLOO is not available. Build with Gloo to
create a wrapper process group in debug mode
to aid collective desynchronization debugging."""
)
else:
pg = _create_process_group_wrapper(
wrapped_pg=pg,
store_prefix=group_name,
store=store,
rank=rank,
world_size=world_size,
timeout=timeout,
)
_pg_map[pg] = (Backend.UCC, store)
_pg_names[pg] = group_name
else:
assert backend.upper() in Backend._plugins, (
f"unknown c10d backend type {backend.upper()}"
)
pg = Backend._plugins[backend.upper()](
prefix_store, rank, world_size, timeout
)
_pg_map[pg] = (backend, store)
_pg_names[pg] = group_name
return pg
def destroy_process_group(group=None):
"""
Destroy a given process group, and deinitialize the distributed package
Args:
group (ProcessGroup, optional): The process group to be destroyed, if
group.WORLD is given, all process
groups including the default one will
be destroyed.
"""
global _pg_map
global _pg_names
global _pg_group_ranks
global _default_pg_init_method
global _group_count
if group == GroupMember.NON_GROUP_MEMBER:
return
if group is None:
pg = GroupMember.WORLD
else:
pg = group
assert pg is not None
if _pg_map.get(pg, None) is None:
raise RuntimeError("Invalid process group specified")
if group is None or group == GroupMember.WORLD:
_update_default_pg(None)
_default_pg_init_method = None
_pg_map.clear()
_pg_names.clear()
_pg_group_ranks.clear()
# when process group doesn't have an explicit name (only WORLD (default)
# process group can have an explicit name), we use global _group_counter
# to generate the name. We need to reset the counter on destruction to
# allow consistent value to be generated when we re-create process
# groups after some trainers recover from failure
#
# We only reset this when WORLD is being destroyed because if this
# process group is in good state, we aren't dealing with failures.
_group_count = 0
else:
del _pg_map[pg]
del _pg_names[pg]
del _pg_group_ranks[pg]
def get_rank(group=None):
"""
Returns the rank of the current process in the provided ``group`` or the
default group if none was provided.
Rank is a unique identifier assigned to each process within a distributed
process group. They are always consecutive integers ranging from 0 to
``world_size``.
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
Returns:
The rank of the process group
-1, if not part of the group
"""
if _rank_not_in_group(group):
return -1
default_pg = _get_default_group()
if group is None or group is GroupMember.WORLD:
return default_pg.rank()
return _get_group_rank(group, default_pg.rank())
def get_world_size(group=None):
"""
Returns the number of processes in the current process group
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
Returns:
The world size of the process group
-1, if not part of the group
"""
if _rank_not_in_group(group):
return -1
return _get_group_size(group)
def isend(tensor, dst, group=None, tag=0):
"""
Sends a tensor asynchronously.
.. warning::
Modifying ``tensor`` before the request completes causes undefined
behavior.
Args:
tensor (Tensor): Tensor to send.
dst (int): Destination rank.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
tag (int, optional): Tag to match send with remote recv
Returns:
A distributed request object.
None, if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
_warn_not_in_group("isend")
return
if group is None or group is GroupMember.WORLD:
default_pg = _get_default_group()
return default_pg.send([tensor], dst, tag)
else:
group_dst_rank = _get_group_rank(group, dst)
return group.send([tensor], group_dst_rank, tag)
def irecv(tensor, src=None, group=None, tag=0):
"""
Receives a tensor asynchronously.
Args:
tensor (Tensor): Tensor to fill with received data.
src (int, optional): Source rank. Will receive from any
process if unspecified.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
tag (int, optional): Tag to match recv with remote send
Returns:
A distributed request object.
None, if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
_warn_not_in_group("irecv")
return
if group is None or group is GroupMember.WORLD:
pg = _get_default_group()
else:
pg = group
if src is None:
return pg.recv_anysource([tensor], tag)
else:
if pg is GroupMember.WORLD:
return pg.recv([tensor], src, tag)
else:
group_src_rank = _get_group_rank(pg, src)
return pg.recv([tensor], group_src_rank, tag)
def send(tensor, dst, group=None, tag=0):
"""
Sends a tensor synchronously.
Args:
tensor (Tensor): Tensor to send.
dst (int): Destination rank.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
tag (int, optional): Tag to match send with remote recv
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
_warn_not_in_group("send")
return
if group is None or group is GroupMember.WORLD:
default_pg = _get_default_group()
default_pg.send([tensor], dst, tag).wait()
else:
group_dst_rank = _get_group_rank(group, dst)
group.send([tensor], group_dst_rank, tag).wait()
def recv(tensor, src=None, group=None, tag=0):
"""
Receives a tensor synchronously.
Args:
tensor (Tensor): Tensor to fill with received data.
src (int, optional): Source rank. Will receive from any
process if unspecified.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
tag (int, optional): Tag to match recv with remote send
Returns:
Sender rank
-1, if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
_warn_not_in_group("recv")
return -1
if group is None:
pg = _get_default_group()
else:
pg = group
if src is None:
work = pg.recv_anysource([tensor], tag)
work.wait()
src_rank = work._source_rank()
if group is None or group is GroupMember.WORLD:
return src_rank
else:
return _get_global_rank(pg, src_rank)
else:
if group is None or group is GroupMember.WORLD:
pg.recv([tensor], src, tag).wait()
else:
group_src_rank = _get_group_rank(pg, src)
pg.recv([tensor], group_src_rank, tag).wait()
return src
class P2POp(object):
"""
A class to build point-to-point operations for ``batch_isend_irecv``.
This class builds the type of P2P operation, communication buffer, peer rank,
Process Group group, and tag. Instances of this class will be passed to
``batch_isend_irecv`` for point-to-point communications.
Args:
op (Callable): A function to send data to or receive data from a peer process.
The type of ``op`` is either ``torch.distributed.isend`` or
``torch.distributed.irecv``.
tensor (Tensor): Tensor to send or receive.
peer (int): Destination or source rank.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
tag (int, optional): Tag to match send with recv.
"""
def __init__(self, op, tensor, peer, group=None, tag=0):
self.op = op
self.tensor = tensor
self.peer = peer
self.group = group
self.tag = tag
def __new__(cls, op, tensor, peer, group=None, tag=0):
_check_op(op)
_check_single_tensor(tensor, "tensor")
return object.__new__(cls)
@contextlib.contextmanager
def _coalescing_manager(group, reqs):
if group is None:
group = _get_default_group()
group._start_coalescing()
try:
yield
finally:
group._end_coalescing(reqs)
def batch_isend_irecv(p2p_op_list):
"""
Send or Receive a batch of tensors asynchronously and return a list of requests.
Process each of the operations in ``p2p_op_list`` and return the corresponding
requests. NCCL, Gloo, and UCC backend are currently supported.
Args:
p2p_op_list: A list of point-to-point operations(type of each operator is
``torch.distributed.P2POp``). The order of the isend/irecv in the list
matters and it needs to match with corresponding isend/irecv on the
remote end.
Returns:
A list of distributed request objects returned by calling the corresponding
op in the op_list.
Examples:
>>> # xdoctest: +SKIP("no rank")
>>> send_tensor = torch.arange(2) + 2 * rank
>>> recv_tensor = torch.randn(2)
>>> send_op = dist.P2POp(dist.isend, send_tensor, (rank + 1)%world_size)
>>> recv_op = dist.P2POp(dist.irecv, recv_tensor, (rank - 1 + world_size)%world_size)
>>> reqs = batch_isend_irecv([send_op, recv_op])
>>> for req in reqs:
>>> req.wait()
>>> recv_tensor
tensor([2, 3]) # Rank 0
tensor([0, 1]) # Rank 1
.. note:: Note that when this API is used with the NCCL PG backend, users must set
the current GPU device with `torch.cuda.set_device`, otherwise it will
lead to unexpected hang issues.
In addition, if this API is the first collective call in the ``group``
passed to ``dist.P2POp``, all ranks of the ``group`` must participate in
this API call; otherwise, the behavior is undefined. If this API call is
not the first collective call in the ``group``, batched P2P operations
involving only a subset of ranks of the ``group`` are allowed.
"""
_check_p2p_op_list(p2p_op_list)
group = p2p_op_list[0].group
reqs = []
with _coalescing_manager(group, reqs):
for p2p_op in p2p_op_list:
op = p2p_op.op
tensor = p2p_op.tensor
peer = p2p_op.peer
curr_group = p2p_op.group
tag = p2p_op.tag
ret = op(tensor, peer, curr_group, tag)
if ret is not None:
reqs.append(ret)
return reqs
def broadcast_multigpu(tensor_list, src, group=None, async_op=False, src_tensor=0):
"""
Broadcasts the tensor to the whole group with multiple GPU tensors
per node.
``tensor`` must have the same number of elements in all the GPUs from
all processes participating in the collective. each tensor in the list must
be on a different GPU
Only nccl and gloo backend are currently supported
tensors should only be GPU tensors
Args:
tensor_list (List[Tensor]): Tensors that participate in the collective
operation. If ``src`` is the rank, then the specified ``src_tensor``
element of ``tensor_list`` (``tensor_list[src_tensor]``) will be
broadcast to all other tensors (on different GPUs) in the src process
and all tensors in ``tensor_list`` of other non-src processes.
You also need to make sure that ``len(tensor_list)`` is the same
for all the distributed processes calling this function.
src (int): Source rank.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op
src_tensor (int, optional): Source tensor rank within ``tensor_list``
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
if _rank_not_in_group(group):
_warn_not_in_group("broadcast_multigpu")
return
opts = BroadcastOptions()
opts.rootRank = src
opts.rootTensor = src_tensor
if group is None or group is GroupMember.WORLD:
default_pg = _get_default_group()
work = default_pg.broadcast(tensor_list, opts)
else:
group_src_rank = _get_group_rank(group, src)
opts.rootRank = group_src_rank
work = group.broadcast(tensor_list, opts)
if async_op:
return work
else:
work.wait()
def broadcast(tensor, src, group=None, async_op=False):
"""
Broadcasts the tensor to the whole group.
``tensor`` must have the same number of elements in all processes
participating in the collective.
Args:
tensor (Tensor): Data to be sent if ``src`` is the rank of current
process, and tensor to be used to save received data otherwise.
src (int): Source rank.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
_warn_not_in_group("broadcast")
return
opts = BroadcastOptions()
opts.rootRank = src
opts.rootTensor = 0
if group is None or group is GroupMember.WORLD:
default_pg = _get_default_group()
work = default_pg.broadcast([tensor], opts)
else:
group_src_rank = _get_group_rank(group, src)
opts.rootRank = group_src_rank
work = group.broadcast([tensor], opts)
if async_op:
return work
else:
work.wait()
def all_reduce_multigpu(tensor_list, op=ReduceOp.SUM, group=None, async_op=False):
r"""
Reduces the tensor data across all machines in such a way that all get
the final result. This function reduces a number of tensors on every node,
while each tensor resides on different GPUs.
Therefore, the input tensor in the tensor list needs to be GPU tensors.
Also, each tensor in the tensor list needs to reside on a different GPU.
After the call, all ``tensor`` in ``tensor_list`` is going to be bitwise
identical in all processes.
Complex tensors are supported.
Only nccl and gloo backend is currently supported
tensors should only be GPU tensors
Args:
tensor_list (List[Tensor]): List of input and output tensors of
the collective. The function operates in-place and requires that
each tensor to be a GPU tensor on different GPUs.
You also need to make sure that ``len(tensor_list)`` is the same for
all the distributed processes calling this function.
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on. If
``None``, the default process group will be used.
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
if _rank_not_in_group(group):
return
tensor_list = [
t if not t.is_complex() else torch.view_as_real(t) for t in tensor_list
]
opts = AllreduceOptions()
opts.reduceOp = op
if group is None:
default_pg = _get_default_group()
work = default_pg.allreduce(tensor_list, opts)
else:
work = group.allreduce(tensor_list, opts)
if async_op:
return work
else:
work.wait()
def all_reduce(tensor, op=ReduceOp.SUM, group=None, async_op=False):
"""
Reduces the tensor data across all machines in such a way that all get
the final result.
After the call ``tensor`` is going to be bitwise identical in all processes.
Complex tensors are supported.
Args:
tensor (Tensor): Input and output of the collective. The function
operates in-place.
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
Examples:
>>> # xdoctest: +SKIP("no rank")
>>> # All tensors below are of torch.int64 type.
>>> # We have 2 process groups, 2 ranks.
>>> tensor = torch.arange(2, dtype=torch.int64) + 1 + 2 * rank
>>> tensor
tensor([1, 2]) # Rank 0
tensor([3, 4]) # Rank 1
>>> dist.all_reduce(tensor, op=ReduceOp.SUM)
>>> tensor
tensor([4, 6]) # Rank 0
tensor([4, 6]) # Rank 1
>>> # All tensors below are of torch.cfloat type.
>>> # We have 2 process groups, 2 ranks.
>>> tensor = torch.tensor([1+1j, 2+2j], dtype=torch.cfloat) + 2 * rank * (1+1j)
>>> tensor
tensor([1.+1.j, 2.+2.j]) # Rank 0
tensor([3.+3.j, 4.+4.j]) # Rank 1
>>> dist.all_reduce(tensor, op=ReduceOp.SUM)
>>> tensor
tensor([4.+4.j, 6.+6.j]) # Rank 0
tensor([4.+4.j, 6.+6.j]) # Rank 1
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
_warn_not_in_group("all_reduce")
return
if tensor.is_complex():
if not supports_complex(op):
raise RuntimeError(f"all_reduce does not support {op} on complex tensors")
tensor = torch.view_as_real(tensor)
opts = AllreduceOptions()
opts.reduceOp = op
if group is None:
default_pg = _get_default_group()
work = default_pg.allreduce([tensor], opts)
else:
work = group.allreduce([tensor], opts)
if async_op:
return work
else:
work.wait()
def all_reduce_coalesced(tensors, op=ReduceOp.SUM, group=None, async_op=False):
"""
WARNING: at this time individual shape checking is not implemented across nodes.
For example, if the rank 0 node passes [torch.rand(4), torch.rand(2)] and the
rank 1 node passes [torch.rand(2), torch.rand(2), torch.rand(2)], the allreduce
operation will proceed without complaint and return erroneous outputs. This lack
of shape checking results in significant performance improvements but users of this
function should take extra care to ensure that each node passes in tensors whose
shapes match across nodes.
Reduces each tensor in tensors (residing on the same device) across all machines
in such a way that all get the final result.
After the call each tensor in tensors is going to bitwise identical
in all processes.
Complex tensors are supported.
Args:
tensors (List[Tensor]): Input and output of the collective. The function
operates in-place.
op (Optional[ReduceOp]): One of the values from
``torch.distributed.ReduceOp`` enum. Specifies an operation used for
element-wise reductions.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (Optional[bool]): Whether this op should be an async op.
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group.
"""
_check_tensor_list(tensors, "tensor")
if _rank_not_in_group(group):
_warn_not_in_group("all_reduce_coalesced")
return
if any([t.is_complex() for t in tensors]) and not supports_complex(op):
raise RuntimeError(f"all_reduce does not support {op} on complex tensors")
tensors = [t if not t.is_complex() else torch.view_as_real(t) for t in tensors]
opts = AllreduceCoalescedOptions()
opts.reduceOp = op
if group is None:
default_pg = _get_default_group()
work = default_pg.allreduce_coalesced(tensors, opts)
else:
work = group.allreduce_coalesced(tensors, opts)
if async_op:
return work.get_future()
else:
work.wait()
def reduce_multigpu(
tensor_list, dst, op=ReduceOp.SUM, group=None, async_op=False, dst_tensor=0
):
"""
Reduces the tensor data on multiple GPUs across all machines. Each tensor
in ``tensor_list`` should reside on a separate GPU
Only the GPU of ``tensor_list[dst_tensor]`` on the process with rank ``dst``
is going to receive the final result.
Only nccl backend is currently supported
tensors should only be GPU tensors
Args:
tensor_list (List[Tensor]): Input and output GPU tensors of the
collective. The function operates in-place.
You also need to make sure that ``len(tensor_list)`` is the same for
all the distributed processes calling this function.
dst (int): Destination rank
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op
dst_tensor (int, optional): Destination tensor rank within
``tensor_list``
Returns:
Async work handle, if async_op is set to True.
None, otherwise
"""
if _rank_not_in_group(group):
_warn_not_in_group("reduce_multigpu")
return
opts = ReduceOptions()
opts.reduceOp = op
opts.rootRank = dst
opts.rootTensor = dst_tensor
if group is None or group is GroupMember.WORLD:
default_pg = _get_default_group()
work = default_pg.reduce(tensor_list, opts)
else:
group_dst_rank = _get_group_rank(group, dst)
opts.rootRank = group_dst_rank
work = group.reduce(tensor_list, opts)
if async_op:
return work
else:
work.wait()
def reduce(tensor, dst, op=ReduceOp.SUM, group=None, async_op=False):
"""
Reduces the tensor data across all machines.
Only the process with rank ``dst`` is going to receive the final result.
Args:
tensor (Tensor): Input and output of the collective. The function
operates in-place.
dst (int): Destination rank
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
_warn_not_in_group("reduce")
return
opts = ReduceOptions()
opts.reduceOp = op
opts.rootRank = dst
if group is None or group is GroupMember.WORLD:
default_pg = _get_default_group()
work = default_pg.reduce([tensor], opts)
else:
group_dst_rank = _get_group_rank(group, dst)
opts.rootRank = group_dst_rank
work = group.reduce([tensor], opts)
if async_op:
return work
else:
work.wait()
def all_gather_multigpu(
output_tensor_lists, input_tensor_list, group=None, async_op=False
):
"""
Gathers tensors from the whole group in a list.
Each tensor in ``tensor_list`` should reside on a separate GPU
Only nccl backend is currently supported
tensors should only be GPU tensors
Complex tensors are supported.
Args:
output_tensor_lists (List[List[Tensor]]): Output lists. It should
contain correctly-sized tensors on each GPU to be used for output
of the collective, e.g. ``output_tensor_lists[i]`` contains the
all_gather result that resides on the GPU of
``input_tensor_list[i]``.
Note that each element of ``output_tensor_lists`` has the size of
``world_size * len(input_tensor_list)``, since the function all
gathers the result from every single GPU in the group. To interpret
each element of ``output_tensor_lists[i]``, note that
``input_tensor_list[j]`` of rank k will be appear in
``output_tensor_lists[i][k * world_size + j]``
Also note that ``len(output_tensor_lists)``, and the size of each
element in ``output_tensor_lists`` (each element is a list,
therefore ``len(output_tensor_lists[i])``) need to be the same
for all the distributed processes calling this function.
input_tensor_list (List[Tensor]): List of tensors(on different GPUs) to
be broadcast from current process.
Note that ``len(input_tensor_list)`` needs to be the same for
all the distributed processes calling this function.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
if _rank_not_in_group(group):
_warn_not_in_group("all_gather_multigpu")
return
output_tensor_lists = [
[t if not t.is_complex() else torch.view_as_real(t) for t in l]
for l in output_tensor_lists
]
input_tensor_list = [
t if not t.is_complex() else torch.view_as_real(t) for t in input_tensor_list
]
if group is None:
default_pg = _get_default_group()
work = default_pg.allgather(output_tensor_lists, input_tensor_list)
else:
work = group.allgather(output_tensor_lists, input_tensor_list)
if async_op:
return work
else:
work.wait()
def _object_to_tensor(obj, device):
f = io.BytesIO()
_pickler(f).dump(obj)
byte_storage = torch.ByteStorage.from_buffer(f.getvalue()) # type: ignore[attr-defined]
# Do not replace `torch.ByteTensor` or `torch.LongTensor` with torch.tensor and specifying dtype.
# Otherwise, it will casue 100X slowdown.
# See: https://github.com/pytorch/pytorch/issues/65696
byte_tensor = torch.ByteTensor(byte_storage).to(device)
local_size = torch.LongTensor([byte_tensor.numel()]).to(device)
return byte_tensor, local_size
def _tensor_to_object(tensor, tensor_size):
tensor = tensor.cpu()
buf = tensor.numpy().tobytes()[:tensor_size]
return _unpickler(io.BytesIO(buf)).load()
def _check_for_nccl_backend(group):
pg = group or _get_default_group()
# Gate PG wrapper check on Gloo availability.
if _GLOO_AVAILABLE:
# It is not expected for PG to be wrapped many times, but support it just
# in case
while isinstance(pg, _ProcessGroupWrapper):
pg = pg.wrapped_pg
return (
is_nccl_available() and
isinstance(pg, ProcessGroupNCCL)
)
def all_gather_object(object_list, obj, group=None):
"""
Gathers picklable objects from the whole group into a list. Similar to
:func:`all_gather`, but Python objects can be passed in. Note that the object
must be picklable in order to be gathered.
Args:
object_list (list[Any]): Output list. It should be correctly sized as the
size of the group for this collective and will contain the output.
object (Any): Pickable Python object to be broadcast from current process.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used. Default is ``None``.
Returns:
None. If the calling rank is part of this group, the output of the
collective will be populated into the input ``object_list``. If the
calling rank is not part of the group, the passed in ``object_list`` will
be unmodified.
.. note:: Note that this API differs slightly from the :func:`all_gather`
collective since it does not provide an ``async_op`` handle and thus
will be a blocking call.
.. note:: For NCCL-based processed groups, internal tensor representations
of objects must be moved to the GPU device before communication takes
place. In this case, the device used is given by
``torch.cuda.current_device()`` and it is the user's responsiblity to
ensure that this is set so that each rank has an individual GPU, via
``torch.cuda.set_device()``.
.. warning::
:func:`all_gather_object` uses ``pickle`` module implicitly, which is
known to be insecure. It is possible to construct malicious pickle data
which will execute arbitrary code during unpickling. Only call this
function with data you trust.
Example::
>>> # xdoctest: +SKIP("need process group init")
>>> # Note: Process group initialization omitted on each rank.
>>> import torch.distributed as dist
>>> # Assumes world_size of 3.
>>> gather_objects = ["foo", 12, {1: 2}] # any picklable object
>>> output = [None for _ in gather_objects]
>>> dist.all_gather_object(output, gather_objects[dist.get_rank()])
>>> output
['foo', 12, {1: 2}]
"""
if _rank_not_in_group(group):
_warn_not_in_group("all_gather_object")
return
current_device = _get_pg_device(group)
input_tensor, local_size = _object_to_tensor(obj, current_device)
# Gather all local sizes. This is so that we can find the max size, and index
# until the correct size when deserializing the tensors.
group_size = get_world_size(group=group)
object_sizes_tensor = torch.zeros(
group_size, dtype=torch.long, device=current_device
)
object_size_list = [
object_sizes_tensor[i].unsqueeze(dim=0) for i in range(group_size)
]
# Allgather tensor sizes
all_gather(object_size_list, local_size, group=group)
max_object_size = int(max(object_size_list).item()) # type: ignore[type-var]
# Resize tensor to max size across all ranks.
input_tensor.resize_(max_object_size)
coalesced_output_tensor = torch.empty(
max_object_size * group_size, dtype=torch.uint8, device=current_device
)
# Output tensors are nonoverlapping views of coalesced_output_tensor
output_tensors = [
coalesced_output_tensor[max_object_size * i : max_object_size * (i + 1)]
for i in range(group_size)
]
all_gather(output_tensors, input_tensor, group=group)
# Deserialize outputs back to object.
for i, tensor in enumerate(output_tensors):
tensor = tensor.type(torch.uint8)
if tensor.device != torch.device("cpu"):
tensor = tensor.cpu()
tensor_size = object_size_list[i]
object_list[i] = _tensor_to_object(tensor, tensor_size)
def gather_object(obj, object_gather_list=None, dst=0, group=None):
"""
Gathers picklable objects from the whole group in a single process.
Similar to :func:`gather`, but Python objects can be passed in. Note that the
object must be picklable in order to be gathered.
Args:
obj (Any): Input object. Must be picklable.
object_gather_list (list[Any]): Output list. On the ``dst`` rank, it
should be correctly sized as the size of the group for this
collective and will contain the output. Must be ``None`` on non-dst
ranks. (default is ``None``)
dst (int, optional): Destination rank. (default is 0)
group: (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used. Default is ``None``.
Returns:
None. On the ``dst`` rank, ``object_gather_list`` will contain the
output of the collective.
.. note:: Note that this API differs slightly from the gather collective
since it does not provide an async_op handle and thus will be a blocking
call.
.. note:: For NCCL-based processed groups, internal tensor representations
of objects must be moved to the GPU device before communication takes
place. In this case, the device used is given by
``torch.cuda.current_device()`` and it is the user's responsiblity to
ensure that this is set so that each rank has an individual GPU, via
``torch.cuda.set_device()``.
.. warning::
:func:`gather_object` uses ``pickle`` module implicitly, which is
known to be insecure. It is possible to construct malicious pickle data
which will execute arbitrary code during unpickling. Only call this
function with data you trust.
Example::
>>> # xdoctest: +SKIP("need process group init")
>>> # Note: Process group initialization omitted on each rank.
>>> import torch.distributed as dist
>>> # Assumes world_size of 3.
>>> gather_objects = ["foo", 12, {1: 2}] # any picklable object
>>> output = [None for _ in gather_objects]
>>> dist.gather_object(
... gather_objects[dist.get_rank()],
... output if dist.get_rank() == 0 else None,
... dst=0
... )
>>> # On rank 0
>>> output
['foo', 12, {1: 2}]
"""
if _rank_not_in_group(group):
_warn_not_in_group("gather_object")
return
# Ensure object_gather_list is specified appopriately.
my_rank = get_rank()
_validate_output_list_for_rank(my_rank, dst, object_gather_list)
current_device = _get_pg_device(group)
input_tensor, local_size = _object_to_tensor(obj, current_device)
# Gather all local sizes. This is so that we can find the max size, and index
# until the correct size when deserializing the tensors.
group_size = get_world_size(group=group)
object_sizes_tensor = torch.zeros(
group_size, dtype=torch.long, device=current_device
)
object_size_list = [
object_sizes_tensor[i].unsqueeze(dim=0) for i in range(group_size)
]
# Allgather tensor sizes. An all-gather is needed here despite this being a
# gather, since each rank needs to broadcast a tensor of the same (maximal)
# size.
all_gather(object_size_list, local_size, group=group)
max_object_size = int(max(object_size_list).item()) # type: ignore[type-var]
# Resize tensor to max size across all ranks.
input_tensor.resize_(max_object_size)
# Avoid populating output tensors if the result won't be gathered on this rank.
if my_rank == dst:
coalesced_output_tensor = torch.empty(
max_object_size * group_size, dtype=torch.uint8, device=current_device
)
# Output tensors are nonoverlapping views of coalesced_output_tensor
output_tensors = [
coalesced_output_tensor[max_object_size * i : max_object_size * (i + 1)]
for i in range(group_size)
]
# All ranks call gather with equal-sized tensors.
gather(
input_tensor,
gather_list=output_tensors if my_rank == dst else None,
dst=dst,
group=group,
)
if my_rank != dst:
return
for i, tensor in enumerate(output_tensors):
tensor = tensor.type(torch.uint8)
tensor_size = object_size_list[i]
object_gather_list[i] = _tensor_to_object(tensor, tensor_size)
def broadcast_object_list(object_list, src=0, group=None, device=None):
"""
Broadcasts picklable objects in ``object_list`` to the whole group. Similar
to :func:`broadcast`, but Python objects can be passed in.
Note that all objects in ``object_list`` must be picklable in order to be
broadcasted.
Args:
object_list (List[Any]): List of input objects to broadcast.
Each object must be picklable. Only objects on the ``src`` rank will
be broadcast, but each rank must provide lists of equal sizes.
src (int): Source rank from which to broadcast ``object_list``.
group: (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used. Default is ``None``.
device (``torch.device``, optional): If not None, the objects are
serialized and converted to tensors which are moved to the
``device`` before broadcasting. Default is ``None``.
Returns:
``None``. If rank is part of the group, ``object_list`` will contain the
broadcasted objects from ``src`` rank.
.. note:: For NCCL-based processed groups, internal tensor representations
of objects must be moved to the GPU device before communication takes
place. In this case, the device used is given by
``torch.cuda.current_device()`` and it is the user's responsiblity to
ensure that this is set so that each rank has an individual GPU, via
``torch.cuda.set_device()``.
.. note:: Note that this API differs slightly from the :func:`all_gather`
collective since it does not provide an ``async_op`` handle and thus
will be a blocking call.
.. warning::
:func:`broadcast_object_list` uses ``pickle`` module implicitly, which
is known to be insecure. It is possible to construct malicious pickle
data which will execute arbitrary code during unpickling. Only call this
function with data you trust.
Example::
>>> # xdoctest: +SKIP("need process group init")
>>> # Note: Process group initialization omitted on each rank.
>>> import torch.distributed as dist
>>> if dist.get_rank() == 0:
>>> # Assumes world_size of 3.
>>> objects = ["foo", 12, {1: 2}] # any picklable object
>>> else:
>>> objects = [None, None, None]
>>> # Assumes backend is not NCCL
>>> device = torch.device("cpu")
>>> dist.broadcast_object_list(objects, src=0, device=device)
>>> objects
['foo', 12, {1: 2}]
"""
if _rank_not_in_group(group):
_warn_not_in_group("broadcast_object_list")
return
# Current device selection.
# To preserve backwards compatibility, ``device`` is default to ``None``
# in which case we run current logic of device selection, i.e.
# ``current_device`` is CUDA if backend is NCCL otherwise CPU device. In the
# case it is not ``None`` we move the size and object tensors to be
# broadcasted to this device.
current_device = device or _get_pg_device(group)
my_rank = get_rank()
# Serialize object_list elements to tensors on src rank.
if my_rank == src:
tensor_list, size_list = zip(*[_object_to_tensor(obj, current_device) for obj in object_list])
object_sizes_tensor = torch.cat(size_list)
else:
object_sizes_tensor = torch.empty(len(object_list), dtype=torch.long, device=current_device)
# Broadcast object sizes
broadcast(object_sizes_tensor, src=src, group=group)
# Concatenate and broadcast serialized object tensors
if my_rank == src:
object_tensor = torch.cat(tensor_list)
else:
object_tensor = torch.empty( # type: ignore[call-overload]
torch.sum(object_sizes_tensor).item(), # type: ignore[arg-type]
dtype=torch.uint8,
device=current_device
)
broadcast(object_tensor, src=src, group=group)
# Deserialize objects using their stored sizes.
offset = 0
if my_rank != src:
for i, obj_size in enumerate(object_sizes_tensor):
obj_view = object_tensor[offset : offset + obj_size]
obj_view = obj_view.type(torch.uint8)
if obj_view.device != torch.device("cpu"):
obj_view = obj_view.cpu()
offset += obj_size
object_list[i] = _tensor_to_object(obj_view, obj_size)
def scatter_object_list(
scatter_object_output_list, scatter_object_input_list, src=0, group=None
):
"""
Scatters picklable objects in ``scatter_object_input_list`` to the whole
group. Similar to :func:`scatter`, but Python objects can be passed in. On
each rank, the scattered object will be stored as the first element of
``scatter_object_output_list``. Note that all objects in
``scatter_object_input_list`` must be picklable in order to be scattered.
Args:
scatter_object_output_list (List[Any]): Non-empty list whose first
element will store the object scattered to this rank.
scatter_object_input_list (List[Any]): List of input objects to scatter.
Each object must be picklable. Only objects on the ``src`` rank will
be scattered, and the argument can be ``None`` for non-src ranks.
src (int): Source rank from which to scatter
``scatter_object_input_list``.
group: (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used. Default is ``None``.
Returns:
``None``. If rank is part of the group, ``scatter_object_output_list``
will have its first element set to the scattered object for this rank.
.. note:: Note that this API differs slightly from the scatter collective
since it does not provide an ``async_op`` handle and thus will be a
blocking call.
.. note:: Note that this API does not support the NCCL backend, as the
tensor-based scatter collective is not supported by ProcessGroupNCCL.
.. warning::
:func:`scatter_object_list` uses ``pickle`` module implicitly, which
is known to be insecure. It is possible to construct malicious pickle
data which will execute arbitrary code during unpickling. Only call this
function with data you trust.
Example::
>>> # xdoctest: +SKIP("need process group init")
>>> # Note: Process group initialization omitted on each rank.
>>> import torch.distributed as dist
>>> if dist.get_rank() == 0:
>>> # Assumes world_size of 3.
>>> objects = ["foo", 12, {1: 2}] # any picklable object
>>> else:
>>> # Can be any list on non-src ranks, elements are not used.
>>> objects = [None, None, None]
>>> output_list = [None]
>>> dist.scatter_object_list(output_list, objects, src=0)
>>> # Rank i gets objects[i]. For example, on rank 2:
>>> output_list
[{1: 2}]
"""
if _rank_not_in_group(group):
_warn_not_in_group("scatter_object_list")
return
if (
not isinstance(scatter_object_output_list, list)
or len(scatter_object_output_list) < 1
):
raise RuntimeError(
"Expected argument scatter_object_output_list to be a list of size at least 1."
)
my_rank = get_rank(group)
pg_device = _get_pg_device(group)
if my_rank == src:
tensor_list, tensor_sizes = zip(
*[_object_to_tensor(obj, pg_device) for obj in scatter_object_input_list]
)
tensor_list, tensor_sizes = list(tensor_list), list(tensor_sizes)
# Src rank broadcasts the maximum tensor size. This is because all ranks are
# expected to call into scatter() with equal-sized tensors.
if my_rank == src:
max_tensor_size = max(tensor_sizes)
for tensor in tensor_list:
tensor.resize_(max_tensor_size)
else:
max_tensor_size = torch.tensor([0], dtype=torch.long, device=pg_device)
broadcast(max_tensor_size, src=src, group=group)
# Scatter actual serialized objects
output_tensor = torch.empty(max_tensor_size.item(), dtype=torch.uint8, device=pg_device)
scatter(
output_tensor,
scatter_list=None if my_rank != src else tensor_list,
src=src,
group=group,
)
# Scatter per-object sizes to trim tensors when deserializing back to object
obj_tensor_size = torch.tensor([0], dtype=torch.long, device=pg_device)
scatter(
obj_tensor_size,
scatter_list=None if my_rank != src else tensor_sizes,
src=src,
group=group,
)
# Deserialize back to object
scatter_object_output_list[0] = _tensor_to_object(output_tensor, obj_tensor_size)
def all_gather(tensor_list, tensor, group=None, async_op=False):
"""
Gathers tensors from the whole group in a list.
Complex tensors are supported.
Args:
tensor_list (list[Tensor]): Output list. It should contain
correctly-sized tensors to be used for output of the collective.
tensor (Tensor): Tensor to be broadcast from current process.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
Examples:
>>> # xdoctest: +SKIP("need process group init")
>>> # All tensors below are of torch.int64 dtype.
>>> # We have 2 process groups, 2 ranks.
>>> tensor_list = [torch.zeros(2, dtype=torch.int64) for _ in range(2)]
>>> tensor_list
[tensor([0, 0]), tensor([0, 0])] # Rank 0 and 1
>>> tensor = torch.arange(2, dtype=torch.int64) + 1 + 2 * rank
>>> tensor
tensor([1, 2]) # Rank 0
tensor([3, 4]) # Rank 1
>>> dist.all_gather(tensor_list, tensor)
>>> tensor_list
[tensor([1, 2]), tensor([3, 4])] # Rank 0
[tensor([1, 2]), tensor([3, 4])] # Rank 1
>>> # All tensors below are of torch.cfloat dtype.
>>> # We have 2 process groups, 2 ranks.
>>> tensor_list = [torch.zeros(2, dtype=torch.cfloat) for _ in range(2)]
>>> tensor_list
[tensor([0.+0.j, 0.+0.j]), tensor([0.+0.j, 0.+0.j])] # Rank 0 and 1
>>> tensor = torch.tensor([1+1j, 2+2j], dtype=torch.cfloat) + 2 * rank * (1+1j)
>>> tensor
tensor([1.+1.j, 2.+2.j]) # Rank 0
tensor([3.+3.j, 4.+4.j]) # Rank 1
>>> dist.all_gather(tensor_list, tensor)
>>> tensor_list
[tensor([1.+1.j, 2.+2.j]), tensor([3.+3.j, 4.+4.j])] # Rank 0
[tensor([1.+1.j, 2.+2.j]), tensor([3.+3.j, 4.+4.j])] # Rank 1
"""
_check_tensor_list(tensor_list, "tensor_list")
_check_single_tensor(tensor, "tensor")
if _rank_not_in_group(group):
_warn_not_in_group("all_gather")
return
tensor_list = [
t if not t.is_complex() else torch.view_as_real(t) for t in tensor_list
]
tensor = tensor if not tensor.is_complex() else torch.view_as_real(tensor)
if group is None:
default_pg = _get_default_group()
work = default_pg.allgather([tensor_list], [tensor])
else:
work = group.allgather([tensor_list], [tensor])
if async_op:
return work
else:
work.wait()
def _all_gather_base(output_tensor, input_tensor, group=None, async_op=False):
"""
Single tensor all gather. Gathers a single tensor from all ranks, and puts them in a single output tensor.
Args:
output_tensor (Tensor): Output tensor. It should contain
correctly-sized tensors to be used for output of the collective.
input_tensor (Tensor): Tensor to be broadcast from current process.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
Examples:
>>> # xdoctest: +SKIP("need process group init")
>>> # All tensors below are of torch.int64 dtype.
>>> # We have 2 process groups, 2 ranks.
>>> output_tensor = torch.zeros(2, dtype=torch.int64)
>>> output_tensor
[tensor([0, 0])] # Rank 0 and 1
>>> tensor = torch.arange(1, dtype=torch.int64) + 1 + rank
>>> tensor
tensor([1]) # Rank 0
tensor([2]) # Rank 1
>>> dist.all_gather_base(output_tensor, tensor)
>>> output_tensor
tensor([1,2]) # Rank 0
tensor([1,2]) # Rank 1
.. warning::
`_all_gather_base` is experimental and subject to change.
It is the caller's responsibility to ensure the output_tensor
is correctly sized.
"""
_check_single_tensor(input_tensor, "input_tensor")
_check_single_tensor(output_tensor, "output_tensor")
if _rank_not_in_group(group):
_warn_not_in_group("_all_gather_base")
return
output_tensor = (
output_tensor
if not output_tensor.is_complex()
else torch.view_as_real(output_tensor)
)
input_tensor = (
input_tensor
if not input_tensor.is_complex()
else torch.view_as_real(input_tensor)
)
if group is None:
default_pg = _get_default_group()
work = default_pg._allgather_base(output_tensor, input_tensor)
else:
work = group._allgather_base(output_tensor, input_tensor)
if async_op:
return work
else:
work.wait()
def all_gather_coalesced(
output_tensor_lists, input_tensor_list, group=None, async_op=False
):
"""
Gathers input tensors from the whole group in a list in a coalesced manner.
Complex tensors are supported.
Args:
output_tensor_lists (list[list[Tensor]]): Output list. It should contain
correctly-sized tensors to be used for output of the collective.
input_tensor_list (list[Tensor]): Tensors to be broadcast from
current process. At least one tensor has to be non empty.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op.
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
Example:
we have 2 process groups, 2 ranks.
rank 0 passes:
input_tensor_list = [[[1, 1], [1, 1]], [2], [3, 3]]
output_tensor_lists =
[[[[-1, -1], [-1, -1]], [-1], [-1, -1]],
[[[-1, -1], [-1, -1]], [-1], [-1, -1]]]
rank 1 passes:
input_tensor_list = [[[3, 3], [3, 3]], [5], [1, 1]]
output_tensor_lists =
[[[[-1, -1], [-1, -1]], [-1], [-1, -1]],
[[[-1, -1], [-1, -1]], [-1], [-1, -1]]]
both rank 0 and 1 get:
output_tensor_lists =
[[[1, 1], [1, 1]], [2], [3, 3]],
[[3, 3], [3, 3]], [5], [1, 1]]].
WARNING: at this time individual shape checking is not implemented across nodes.
For example, if the rank 0 node passes [torch.rand(4), torch.rand(2)] and the
rank 1 node passes [torch.rand(2), torch.rand(2), torch.rand(2)], the
all_gather_coalesced operation will proceed without complaint and return
erroneous outputs. This lack of shape checking results in significant
performance improvements but users of this function should take extra care
to ensure that each node passes in tensors whose shapes match across nodes.
"""
# We only check basic compatibility with C++ params here, C++ code will
# do shape and type checking.
if _rank_not_in_group(group):
_warn_not_in_group("all_gather_coalesced")
return
_check_tensor_list(input_tensor_list, "tensor_list")
if not isinstance(output_tensor_lists, list):
raise RuntimeError(
"Invalid function argument: " "output_tensor_lists should be a list"
)
for output_tensor_list in output_tensor_lists:
_check_tensor_list(output_tensor_list, "output_tensor_lists")
output_tensor_lists = [
[t if not t.is_complex() else torch.view_as_real(t) for t in l]
for l in output_tensor_lists
]
input_tensor_list = [
t if not t.is_complex() else torch.view_as_real(t) for t in input_tensor_list
]
if group is None:
default_pg = _get_default_group()
work = default_pg.allgather_coalesced(output_tensor_lists, input_tensor_list)
else:
work = group.allgather_coalesced(output_tensor_lists, input_tensor_list)
if async_op:
return work.get_future()
else:
work.wait()
def _validate_output_list_for_rank(my_rank, dst, gather_list):
if dst == my_rank:
if not gather_list:
raise ValueError(
"Argument ``gather_list`` must be specified on destination rank."
)
elif gather_list:
raise ValueError(
"Argument ``gather_list`` must NOT be specified "
"on non-destination ranks."
)
def gather(tensor, gather_list=None, dst=0, group=None, async_op=False):
"""
Gathers a list of tensors in a single process.
Args:
tensor (Tensor): Input tensor.
gather_list (list[Tensor], optional): List of appropriately-sized
tensors to use for gathered data (default is None, must be specified
on the destination rank)
dst (int, optional): Destination rank (default is 0)
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_single_tensor(tensor, "tensor")
# Parameter ``gather_list`` may be left unspecified on non-dst ranks.
if gather_list:
_check_tensor_list(gather_list, "gather_list")
else:
gather_list = []
if _rank_not_in_group(group):
_warn_not_in_group("gather")
return
my_rank = get_rank()
_validate_output_list_for_rank(my_rank, dst, gather_list)
output_tensors = [gather_list] if dst == my_rank else []
input_tensors = [tensor]
opts = GatherOptions()
opts.rootRank = dst
if group is None or group is GroupMember.WORLD:
default_pg = _get_default_group()
work = default_pg.gather(output_tensors, input_tensors, opts)
else:
group_dst_rank = _get_group_rank(group, dst)
opts.rootRank = group_dst_rank
work = group.gather(output_tensors, input_tensors, opts)
if async_op:
return work
else:
work.wait()
def scatter(tensor, scatter_list=None, src=0, group=None, async_op=False):
"""
Scatters a list of tensors to all processes in a group.
Each process will receive exactly one tensor and store its data in the
``tensor`` argument.
Complex tensors are supported.
Args:
tensor (Tensor): Output tensor.
scatter_list (list[Tensor]): List of tensors to scatter (default is
None, must be specified on the source rank)
src (int): Source rank (default is 0)
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
_check_single_tensor(tensor, "tensor")
# Parameter ``scatter_list`` may be left unspecified on non-src ranks.
if scatter_list:
_check_tensor_list(scatter_list, "scatter_list")
else:
scatter_list = []
if _rank_not_in_group(group):
_warn_not_in_group("scatter")
return
scatter_list = [
t if not t.is_complex() else torch.view_as_real(t) for t in scatter_list
]
tensor = tensor if not tensor.is_complex() else torch.view_as_real(tensor)
my_rank = get_rank()
if src == my_rank:
if not scatter_list:
raise ValueError(
"Argument ``scatter_list`` must be specified " "on source rank."
)
input_tensors = [scatter_list]
output_tensors = [tensor]
else:
if scatter_list:
raise ValueError(
"Argument ``scatter_list`` must NOT be specified "
"on non-source ranks."
)
input_tensors = []
output_tensors = [tensor]
opts = ScatterOptions()
opts.rootRank = src
if group is None or group is GroupMember.WORLD:
default_pg = _get_default_group()
work = default_pg.scatter(output_tensors, input_tensors, opts)
else:
group_src_rank = _get_group_rank(group, src)
opts.rootRank = group_src_rank
work = group.scatter(output_tensors, input_tensors, opts)
if async_op:
return work
else:
work.wait()
def reduce_scatter_multigpu(
output_tensor_list, input_tensor_lists, op=ReduceOp.SUM, group=None, async_op=False
):
"""
Reduce and scatter a list of tensors to the whole group. Only nccl backend
is currently supported.
Each tensor in ``output_tensor_list`` should reside on a separate GPU, as
should each list of tensors in ``input_tensor_lists``.
Args:
output_tensor_list (List[Tensor]): Output tensors (on different GPUs)
to receive the result of the operation.
Note that ``len(output_tensor_list)`` needs to be the same for all
the distributed processes calling this function.
input_tensor_lists (List[List[Tensor]]): Input lists. It should
contain correctly-sized tensors on each GPU to be used for input of
the collective, e.g. ``input_tensor_lists[i]`` contains the
reduce_scatter input that resides on the GPU of
``output_tensor_list[i]``.
Note that each element of ``input_tensor_lists`` has the size of
``world_size * len(output_tensor_list)``, since the function
scatters the result from every single GPU in the group. To
interpret each element of ``input_tensor_lists[i]``, note that
``output_tensor_list[j]`` of rank k receives the reduce-scattered
result from ``input_tensor_lists[i][k * world_size + j]``
Also note that ``len(input_tensor_lists)``, and the size of each
element in ``input_tensor_lists`` (each element is a list,
therefore ``len(input_tensor_lists[i])``) need to be the same for
all the distributed processes calling this function.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op.
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group.
"""
if _rank_not_in_group(group):
_warn_not_in_group("reduce_scatter_multigpu")
return
opts = ReduceScatterOptions()
opts.reduceOp = op
if group is None:
default_pg = _get_default_group()
work = default_pg.reduce_scatter(output_tensor_list, input_tensor_lists, opts)
else:
work = group.reduce_scatter(output_tensor_list, input_tensor_lists, opts)
if async_op:
return work
else:
work.wait()
def reduce_scatter(output, input_list, op=ReduceOp.SUM, group=None, async_op=False):
"""
Reduces, then scatters a list of tensors to all processes in a group.
Args:
output (Tensor): Output tensor.
input_list (list[Tensor]): List of tensors to reduce and scatter.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op.
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group.
"""
_check_single_tensor(output, "output")
_check_tensor_list(input_list, "input_list")
if _rank_not_in_group(group):
_warn_not_in_group("reduce_scatter")
return
opts = ReduceScatterOptions()
opts.reduceOp = op
if group is None:
default_pg = _get_default_group()
work = default_pg.reduce_scatter([output], [input_list], opts)
else:
work = group.reduce_scatter([output], [input_list], opts)
if async_op:
return work
else:
work.wait()
def _reduce_scatter_base(output, input, op=ReduceOp.SUM, group=None, async_op=False):
"""
Reduces, then scatters a flattened tensor to all processes in a group.
Args:
output (Tensor): Output tensor.
input (Tensor): Input tensor that is of size output tensor size times world size
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op.
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group.
"""
_check_single_tensor(output, "output")
_check_single_tensor(input, "input")
if _rank_not_in_group(group):
_warn_not_in_group("_reduce_scatter_base")
return
opts = ReduceScatterOptions()
opts.reduceOp = op
if group is None:
default_pg = _get_default_group()
work = default_pg._reduce_scatter_base(output, input, opts)
else:
work = group._reduce_scatter_base(output, input, opts)
if async_op:
return work
else:
work.wait()
def all_to_all_single(
output,
input,
output_split_sizes=None,
input_split_sizes=None,
group=None,
async_op=False,
):
"""
Each process splits input tensor and then scatters the split list
to all processes in a group. Then concatenate the received tensors from all
the processes in the group and return single output tensor.
Complex tensors are supported.
Args:
output (Tensor): Gathered cancatenated output tensor.
input (Tensor): Input tensor to scatter.
output_split_sizes: (list[Int], optional): Output split sizes for dim 0
if specified None or empty, dim 0 of ``output`` tensor must divide
equally by ``world_size``.
input_split_sizes: (list[Int], optional): Input split sizes for dim 0
if specified None or empty, dim 0 of ``input`` tensor must divide
equally by ``world_size``.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op.
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group.
.. warning::
`all_to_all_single` is experimental and subject to change.
Examples:
>>> # xdoctest: +SKIP("Undefined rank")
>>> input = torch.arange(4) + rank * 4
>>> input
tensor([0, 1, 2, 3]) # Rank 0
tensor([4, 5, 6, 7]) # Rank 1
tensor([8, 9, 10, 11]) # Rank 2
tensor([12, 13, 14, 15]) # Rank 3
>>> output = torch.empty([4], dtype=torch.int64)
>>> dist.all_to_all_single(output, input)
>>> output
tensor([0, 4, 8, 12]) # Rank 0
tensor([1, 5, 9, 13]) # Rank 1
tensor([2, 6, 10, 14]) # Rank 2
tensor([3, 7, 11, 15]) # Rank 3
>>> # Essentially, it is similar to following operation:
>>> scatter_list = list(input.chunk(world_size))
>>> gather_list = list(output.chunk(world_size))
>>> for i in range(world_size):
>>> dist.scatter(gather_list[i], scatter_list if i == rank else [], src = i)
>>> # Another example with uneven split
>>> input
tensor([0, 1, 2, 3, 4, 5]) # Rank 0
tensor([10, 11, 12, 13, 14, 15, 16, 17, 18]) # Rank 1
tensor([20, 21, 22, 23, 24]) # Rank 2
tensor([30, 31, 32, 33, 34, 35, 36]) # Rank 3
>>> input_splits
[2, 2, 1, 1] # Rank 0
[3, 2, 2, 2] # Rank 1
[2, 1, 1, 1] # Rank 2
[2, 2, 2, 1] # Rank 3
>>> output_splits
[2, 3, 2, 2] # Rank 0
[2, 2, 1, 2] # Rank 1
[1, 2, 1, 2] # Rank 2
[1, 2, 1, 1] # Rank 3
>>> output = ...
>>> dist.all_to_all_single(output, input, output_splits, input_splits)
>>> output
tensor([ 0, 1, 10, 11, 12, 20, 21, 30, 31]) # Rank 0
tensor([ 2, 3, 13, 14, 22, 32, 33]) # Rank 1
tensor([ 4, 15, 16, 23, 34, 35]) # Rank 2
tensor([ 5, 17, 18, 24, 36]) # Rank 3
>>> # Another example with tensors of torch.cfloat type.
>>> input = torch.tensor([1+1j, 2+2j, 3+3j, 4+4j], dtype=torch.cfloat) + 4 * rank * (1+1j)
>>> input
tensor([1+1j, 2+2j, 3+3j, 4+4j]) # Rank 0
tensor([5+5j, 6+6j, 7+7j, 8+8j]) # Rank 1
tensor([9+9j, 10+10j, 11+11j, 12+12j]) # Rank 2
tensor([13+13j, 14+14j, 15+15j, 16+16j]) # Rank 3
>>> output = torch.empty([4], dtype=torch.int64)
>>> dist.all_to_all_single(output, input)
>>> output
tensor([1+1j, 5+5j, 9+9j, 13+13j]) # Rank 0
tensor([2+2j, 6+6j, 10+10j, 14+14j]) # Rank 1
tensor([3+3j, 7+7j, 11+11j, 15+15j]) # Rank 2
tensor([4+4j, 8+8j, 12+12j, 16+16j]) # Rank 3
"""
if _rank_not_in_group(group):
_warn_not_in_group("all_to_all_single")
return
opts = AllToAllOptions()
_check_single_tensor(output, "output")
_check_single_tensor(input, "input")
if input.is_complex():
input = torch.view_as_real(input)
if output.is_complex():
output = torch.view_as_real(output)
output_split_sizes = [] if output_split_sizes is None else output_split_sizes
input_split_sizes = [] if input_split_sizes is None else input_split_sizes
if group is None:
default_pg = _get_default_group()
work = default_pg.alltoall_base(
output, input, output_split_sizes, input_split_sizes, opts
)
else:
work = group.alltoall_base(
output, input, output_split_sizes, input_split_sizes, opts
)
if async_op:
return work
else:
work.wait()
def all_to_all(output_tensor_list, input_tensor_list, group=None, async_op=False):
"""
Each process scatters list of input tensors to all processes in a group and
return gathered list of tensors in output list.
Complex tensors are supported.
Args:
output_tensor_list (list[Tensor]): List of tensors to be gathered one
per rank.
input_tensor_list (list[Tensor]): List of tensors to scatter one per rank.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op.
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group.
.. warning::
`all_to_all` is experimental and subject to change.
Examples:
>>> # xdoctest: +SKIP("Undefined rank")
>>> input = torch.arange(4) + rank * 4
>>> input = list(input.chunk(4))
>>> input
[tensor([0]), tensor([1]), tensor([2]), tensor([3])] # Rank 0
[tensor([4]), tensor([5]), tensor([6]), tensor([7])] # Rank 1
[tensor([8]), tensor([9]), tensor([10]), tensor([11])] # Rank 2
[tensor([12]), tensor([13]), tensor([14]), tensor([15])] # Rank 3
>>> output = list(torch.empty([4], dtype=torch.int64).chunk(4))
>>> dist.all_to_all(output, input)
>>> output
[tensor([0]), tensor([4]), tensor([8]), tensor([12])] # Rank 0
[tensor([1]), tensor([5]), tensor([9]), tensor([13])] # Rank 1
[tensor([2]), tensor([6]), tensor([10]), tensor([14])] # Rank 2
[tensor([3]), tensor([7]), tensor([11]), tensor([15])] # Rank 3
>>> # Essentially, it is similar to following operation:
>>> scatter_list = input
>>> gather_list = output
>>> for i in range(world_size):
>>> dist.scatter(gather_list[i], scatter_list if i == rank else [], src = i)
>>> input
tensor([0, 1, 2, 3, 4, 5]) # Rank 0
tensor([10, 11, 12, 13, 14, 15, 16, 17, 18]) # Rank 1
tensor([20, 21, 22, 23, 24]) # Rank 2
tensor([30, 31, 32, 33, 34, 35, 36]) # Rank 3
>>> input_splits
[2, 2, 1, 1] # Rank 0
[3, 2, 2, 2] # Rank 1
[2, 1, 1, 1] # Rank 2
[2, 2, 2, 1] # Rank 3
>>> output_splits
[2, 3, 2, 2] # Rank 0
[2, 2, 1, 2] # Rank 1
[1, 2, 1, 2] # Rank 2
[1, 2, 1, 1] # Rank 3
>>> input = list(input.split(input_splits))
>>> input
[tensor([0, 1]), tensor([2, 3]), tensor([4]), tensor([5])] # Rank 0
[tensor([10, 11, 12]), tensor([13, 14]), tensor([15, 16]), tensor([17, 18])] # Rank 1
[tensor([20, 21]), tensor([22]), tensor([23]), tensor([24])] # Rank 2
[tensor([30, 31]), tensor([32, 33]), tensor([34, 35]), tensor([36])] # Rank 3
>>> output = ...
>>> dist.all_to_all(output, input)
>>> output
[tensor([0, 1]), tensor([10, 11, 12]), tensor([20, 21]), tensor([30, 31])] # Rank 0
[tensor([2, 3]), tensor([13, 14]), tensor([22]), tensor([32, 33])] # Rank 1
[tensor([4]), tensor([15, 16]), tensor([23]), tensor([34, 35])] # Rank 2
[tensor([5]), tensor([17, 18]), tensor([24]), tensor([36])] # Rank 3
>>> # Another example with tensors of torch.cfloat type.
>>> input = torch.tensor([1+1j, 2+2j, 3+3j, 4+4j], dtype=torch.cfloat) + 4 * rank * (1+1j)
>>> input = list(input.chunk(4))
>>> input
[tensor([1+1j]), tensor([2+2j]), tensor([3+3j]), tensor([4+4j])] # Rank 0
[tensor([5+5j]), tensor([6+6j]), tensor([7+7j]), tensor([8+8j])] # Rank 1
[tensor([9+9j]), tensor([10+10j]), tensor([11+11j]), tensor([12+12j])] # Rank 2
[tensor([13+13j]), tensor([14+14j]), tensor([15+15j]), tensor([16+16j])] # Rank 3
>>> output = list(torch.empty([4], dtype=torch.int64).chunk(4))
>>> dist.all_to_all(output, input)
>>> output
[tensor([1+1j]), tensor([5+5j]), tensor([9+9j]), tensor([13+13j])] # Rank 0
[tensor([2+2j]), tensor([6+6j]), tensor([10+10j]), tensor([14+14j])] # Rank 1
[tensor([3+3j]), tensor([7+7j]), tensor([11+11j]), tensor([15+15j])] # Rank 2
[tensor([4+4j]), tensor([8+8j]), tensor([12+12j]), tensor([16+16j])] # Rank 3
"""
if _rank_not_in_group(group):
_warn_not_in_group("all_to_all")
return
opts = AllToAllOptions()
_check_tensor_list(output_tensor_list, "output_tensor_list")
_check_tensor_list(input_tensor_list, "input_tensor_list")
input_tensor_list = [
t if not t.is_complex() else torch.view_as_real(t) for t in input_tensor_list
]
output_tensor_list = [
t if not t.is_complex() else torch.view_as_real(t) for t in output_tensor_list
]
if group is None:
default_pg = _get_default_group()
work = default_pg.alltoall(output_tensor_list, input_tensor_list, opts)
else:
work = group.alltoall(output_tensor_list, input_tensor_list, opts)
if async_op:
return work
else:
work.wait()
def barrier(group=GroupMember.WORLD, async_op=False, device_ids=None):
"""
Synchronizes all processes.
This collective blocks processes until the whole group enters this function,
if async_op is False, or if async work handle is called on wait().
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op
device_ids ([int], optional): List of device/GPU ids.
Valid only for NCCL backend.
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
if _rank_not_in_group(group):
_warn_not_in_group("barrier")
return
opts = BarrierOptions()
if device_ids is not None:
if get_backend(group) != Backend.NCCL:
raise RuntimeError(
"Function argument device_ids not supported "
"for the selected backend {}".format(get_backend(group))
)
if isinstance(device_ids, list):
opts.device_ids = device_ids
else:
raise RuntimeError(
"Invalid function argument: " "device_ids type should be List[int]"
)
if group is None:
default_pg = _get_default_group()
work = default_pg.barrier(opts=opts)
else:
work = group.barrier(opts=opts)
if async_op:
return work
else:
work.wait()
def monitored_barrier(group=GroupMember.WORLD, timeout=None, wait_all_ranks=False):
"""
Synchronizes all processes similar to ``torch.distributed.barrier``, but takes
a configurable timeout and is able to report ranks that did not pass this
barrier within that timeout. Specifically, for non-zero ranks, will block
until a send/recv is processed from rank 0. Rank 0 will block until all send
/recv from other ranks are processed, and will report failures for ranks
that failed to respond in time. Note that if one rank does not reach the
monitored_barrier (for example due to a hang), all other ranks would fail
in monitored_barrier.
This collective will block all processes/ranks in the group, until the
whole group exits the function successfully, making it useful for debugging
and synchronizing. However, it can have a performance impact and should only
be used for debugging or scenarios that require full synchronization points
on the host-side. For debugging purposees, this barrier can be inserted
before the application's collective calls to check if any ranks are
desynchronized.
.. note:: Note that this collective is only supported with the GLOO backend.
Args:
group (ProcessGroup, optional): The process group to work on. If
``None``, the default process group will be used.
timeout (datetime.timedelta, optional): Timeout for monitored_barrier.
If ``None``, the default process group timeout will be used.
wait_all_ranks (bool, optional): Whether to collect all failed ranks or
not. By default, this is ``False`` and ``monitored_barrier`` on rank 0
will throw on the first failed rank it encounters in order to fail
fast. By setting ``wait_all_ranks=True`` ``monitored_barrier`` will
collect all failed ranks and throw an error containing information
about all failed ranks.
Returns:
``None``.
Example::
>>> # xdoctest: +SKIP("need process group init")
>>> # Note: Process group initialization omitted on each rank.
>>> import torch.distributed as dist
>>> if dist.get_rank() != 1:
>>> dist.monitored_barrier() # Raises exception indicating that
>>> # rank 1 did not call into monitored_barrier.
>>> # Example with wait_all_ranks=True
>>> if dist.get_rank() == 0:
>>> dist.monitored_barrier(wait_all_ranks=True) # Raises exception
>>> # indicating that ranks 1, 2, ... world_size - 1 did not call into
>>> # monitored_barrier.
"""
# Need to call rank not in group before using the group, otherwise
# "Invalid process group" error is raised.
if _rank_not_in_group(group):
_warn_not_in_group("monitored_barrier")
return
if get_backend(group) != Backend.GLOO:
raise RuntimeError("monitored_barrier is only implemented for GLOO backend.")
if timeout is None:
timeout = default_pg_timeout
group_to_use = _get_default_group() if group is None else group
return group_to_use.monitored_barrier(timeout, wait_all_ranks=wait_all_ranks)
def _create_process_group_wrapper(
wrapped_pg: ProcessGroup,
store_prefix: str,
store: Store,
rank: int,
world_size: int,
timeout: timedelta = default_pg_timeout,
):
# Create a separate prefix store for the helper process group.
prefix = f"{PG_WRAPPER_STORE_PREFIX}:{store_prefix}"
store = PrefixStore(prefix, store)
helper_pg = ProcessGroupGloo(store, rank, world_size, timeout=timeout)
# Wrap the underlying pg with ProcessGroupWrapper.
wrapped_pg = _ProcessGroupWrapper(wrapped_pg, helper_pg)
return wrapped_pg
def new_group(ranks=None, timeout=default_pg_timeout, backend=None, pg_options=None):
"""
Creates a new distributed group.
This function requires that all processes in the main group (i.e. all
processes that are part of the distributed job) enter this function, even
if they are not going to be members of the group. Additionally, groups
should be created in the same order in all processes.
.. warning::
Using multiple process groups with the ``NCCL`` backend concurrently
is not safe and the user should perform explicit synchronization in
their application to ensure only one process group is used at a time.
This means collectives from one process group should have completed
execution on the device (not just enqueued since CUDA execution is
async) before collectives from another process group are enqueued.
See `Using multiple NCCL communicators concurrently <https://docs.nvid
ia.com/deeplearning/nccl/user-guide/docs/usage/communicators.html#using
-multiple-nccl-communicators-concurrently>`_ for more details.
Args:
ranks (list[int]): List of ranks of group members. If ``None``, will be
set to all ranks. Default is ``None``.
timeout (timedelta, optional): Timeout for operations executed against
the process group. Default value equals 30 minutes.
This is applicable for the ``gloo`` backend. For ``nccl``, this is
applicable only if the environment variable ``NCCL_BLOCKING_WAIT``
or ``NCCL_ASYNC_ERROR_HANDLING`` is set to 1. When
``NCCL_BLOCKING_WAIT`` is set, this is the duration for which the
process will block and wait for collectives to complete before
throwing an exception. When ``NCCL_ASYNC_ERROR_HANDLING`` is set,
this is the duration after which collectives will be aborted
asynchronously and the process will crash. ``NCCL_BLOCKING_WAIT``
will provide errors to the user which can be caught and handled,
but due to its blocking nature, it has a performance overhead. On
the other hand, ``NCCL_ASYNC_ERROR_HANDLING`` has very little
performance overhead, but crashes the process on errors. This is
done since CUDA execution is async and it is no longer safe to
continue executing user code since failed async NCCL operations
might result in subsequent CUDA operations running on corrupted
data. Only one of these two environment variables should be set.
backend (str or Backend, optional): The backend to use. Depending on
build-time configurations, valid values are ``gloo`` and ``nccl``.
By default uses the same backend as the global group. This field
should be given as a lowercase string (e.g., ``"gloo"``), which can
also be accessed via :class:`Backend` attributes (e.g.,
``Backend.GLOO``). If ``None`` is passed in, the backend
corresponding to the default process group will be used. Default is
``None``.
pg_options (ProcessGroupOptions, optional): process group options
specifying what additional options need to be passed in during
the construction of specific process groups. i.e. for the ``nccl``
backend, ``is_high_priority_stream`` can be specified so that
process group can pick up high priority cuda streams.
Returns:
A handle of distributed group that can be given to collective calls.
"""
global _pg_group_ranks
default_pg = _get_default_group()
default_backend, default_store = _pg_map[default_pg]
global_rank = default_pg.rank()
global_world_size = default_pg.size()
# Default to the same backend as the global process group
# if the backend is not specified.
if not backend:
backend = default_backend
# checks the input ranks
if ranks is not None:
ranks = sorted(ranks)
group_world_size = len(ranks)
if group_world_size > global_world_size:
raise RuntimeError(
"the new group's world size should be less or "
"equal to the world size set by "
"init_process_group"
)
# check ranks' sanity
for rank in ranks:
if rank < 0 or rank >= global_world_size:
raise RuntimeError(
"The new group's rank should be within the "
"the world_size set by init_process_group"
)
if global_rank in ranks:
group_rank = ranks.index(global_rank)
else:
group_rank = None
else:
ranks = list(range(global_world_size))
group_world_size = global_world_size
group_rank = global_rank
backend = Backend(backend)
pg = _new_process_group_helper(
group_world_size,
group_rank,
ranks,
backend,
default_store,
pg_options=pg_options,
timeout=timeout,
)
# Create the global rank to group rank mapping
_pg_group_ranks[pg] = {
global_rank: group_rank for group_rank, global_rank in enumerate(ranks)
}
# barrier at the end to ensure that once we return from this method, all
# process groups including global variables are updated correctly on all
# ranks.
if backend == Backend.MPI:
# MPI doesn't have store.
barrier()
else:
# Use store based barrier here since barrier() used a bunch of
# default devices and messes up NCCL internal state.
_store_based_barrier(global_rank, default_store, timeout)
# Set sequence numbers for gloo and nccl process groups.
if pg != GroupMember.NON_GROUP_MEMBER and get_backend(pg) in [
Backend.GLOO,
Backend.NCCL,
]:
pg._set_sequence_number_for_group()
return pg
def new_subgroups(
group_size=None,
group=None,
timeout=default_pg_timeout,
backend=None,
pg_options=None,
):
"""
Creates GPU subgroups of equal size. By default, it creates intra-machine subgroups,
where each of which contains all the ranks of a machine, based on the assumption
that each machine has the same number of CUDA devices.
This is a convenience API that calls ``new_group`` to generate multiple subgroups.
It requires that all processes in the main group (i.e. all
processes that are part of the distributed job) enter this function, even
if they are not going to be members of the group.
.. warning::
This API only works when CUDA is available.
.. warning::
If ``group_size`` is passed in, the world size must be divisible by ``group_size``.
If no ``group_size`` is passed in, and not all the machines have the same number
of devices, the subgroup division will be different across nodes and can cause
unexpected behaviors.
.. warning::
Using multiple process groups with the ``NCCL`` backend concurrently
is not safe and the user should perform explicit synchronization in
their application to ensure only one process group is used at a time.
This means collectives from one process group should have completed
execution on the device (not just enqueued since CUDA execution is
async) before collectives from another process group are enqueued.
See `Using multiple NCCL communicators concurrently <https://docs.nvid
ia.com/deeplearning/nccl/user-guide/docs/usage/communicators.html#using
-multiple-nccl-communicators-concurrently>`_ for more details.
Args:
group_size (int, optional): The size of each subgroup. If ``None``,
the default subgroup size is equal to the number of devices on each machine,
based on the assumption that each machine has exactly the same
number of devices. Default is ``None``.
timeout (timedelta, optional): Timeout for operations executed against
the process group. Default value equals 30 minutes.
This is applicable for the ``gloo`` backend. For ``nccl``, this is
applicable only if the environment variable ``NCCL_BLOCKING_WAIT``
or ``NCCL_ASYNC_ERROR_HANDLING`` is set to 1. When
``NCCL_BLOCKING_WAIT`` is set, this is the duration for which the
process will block and wait for collectives to complete before
throwing an exception. When ``NCCL_ASYNC_ERROR_HANDLING`` is set,
this is the duration after which collectives will be aborted
asynchronously and the process will crash. ``NCCL_BLOCKING_WAIT``
will provide errors to the user which can be caught and handled,
but due to its blocking nature, it has a performance overhead. On
the other hand, ``NCCL_ASYNC_ERROR_HANDLING`` has very little
performance overhead, but crashes the process on errors. This is
done since CUDA execution is async and it is no longer safe to
continue executing user code since failed async NCCL operations
might result in subsequent CUDA operations running on corrupted
data. Only one of these two environment variables should be set.
backend (str or Backend, optional): The backend to use. Depending on
build-time configurations, valid values are ``gloo`` and ``nccl``.
By default uses the same backend as the global group. This field
should be given as a lowercase string (e.g., ``"gloo"``), which can
also be accessed via :class:`Backend` attributes (e.g.,
``Backend.GLOO``). If ``None`` is passed in, the backend
corresponding to the default process group will be used. Default is
``None``.
pg_options (ProcessGroupOptions, optional): process group options
specifying what additional options need to be passed in during
the construction of specific process groups. i.e. for the ``nccl``
backend, ``is_high_priority_stream`` can be specified so that
process group can pick up high priority cuda streams.
Returns:
The subgroup containing the current rank, and all the subgroups used for cleanup.
Examples:
>>> # Create intra-machine subgroups.
>>> # xdoctest: +SKIP("need process group init")
>>> cur_subgroup, subgroups = dist.new_subgroups()
>>> # Allreduce within the machine.
>>> rank = dist.get_rank()
>>> tensor = torch.ones(1, device=rank) * rank
>>> dist.all_reduce(tensor, group=cur_subgroup)
>>> tensor
tensor([8]) # Assume 8 is the number of CUDA devices per machine.
>>> # Cleanup.
>>> for subgroup in subgroups:
>>> dist.destroy_process_group(subgroup)
"""
if not torch.cuda.is_available():
raise ValueError("Subgroups can only be created when CUDA is available")
if group_size is None:
group_size = torch.cuda.device_count()
world_size = get_world_size()
if world_size < group_size:
raise ValueError("The arg 'group_size' must not exceed the world size")
if world_size % group_size != 0:
raise ValueError("The world size must be divisible by 'group_size'")
subgroups = []
cur_subgroup = None
for subgroup_id in range(world_size // group_size):
start_rank = subgroup_id * group_size
end_rank = start_rank + group_size
ranks_in_subgroup = list(range(start_rank, end_rank))
subgroup = new_group(
ranks=ranks_in_subgroup,
timeout=timeout,
backend=backend,
pg_options=pg_options,
)
subgroups.append(subgroup)
rank = get_rank()
if rank in ranks_in_subgroup:
cur_subgroup = subgroup
logger.info(
"Rank {} is assigned to subgroup {}".format(rank, ranks_in_subgroup)
)
return cur_subgroup, subgroups
def new_subgroups_by_enumeration(
ranks_per_subgroup_list,
timeout=default_pg_timeout,
backend=None,
pg_options=None,
):
"""
Creates GPU subgroups by dividing the global world, where the division is specified by
a nested list of ranks. The subgroups cannot have overlap, and some ranks may not have
to be in any subgroup.
This is a convenience API that calls ``new_group`` to generate multiple subgroups.
It requires that all processes in the main group (i.e. all
processes that are part of the distributed job) enter this function, even
if they are not going to be members of the group.
.. warning::
Using multiple process groups with the ``NCCL`` backend concurrently
is not safe and the user should perform explicit synchronization in
their application to ensure only one process group is used at a time.
This means collectives from one process group should have completed
execution on the device (not just enqueued since CUDA execution is
async) before collectives from another process group are enqueued.
See `Using multiple NCCL communicators concurrently <https://docs.nvid
ia.com/deeplearning/nccl/user-guide/docs/usage/communicators.html#using
-multiple-nccl-communicators-concurrently>`_ for more details.
Args:
ranks_per_subgroup_list (list[list[int]]): A nested list of ranks of
group members.
timeout (timedelta, optional): Timeout for operations executed against
the process group. Default value equals 30 minutes.
This is applicable for the ``gloo`` backend. For ``nccl``, this is
applicable only if the environment variable ``NCCL_BLOCKING_WAIT``
or ``NCCL_ASYNC_ERROR_HANDLING`` is set to 1. When
``NCCL_BLOCKING_WAIT`` is set, this is the duration for which the
process will block and wait for collectives to complete before
throwing an exception. When ``NCCL_ASYNC_ERROR_HANDLING`` is set,
this is the duration after which collectives will be aborted
asynchronously and the process will crash. ``NCCL_BLOCKING_WAIT``
will provide errors to the user which can be caught and handled,
but due to its blocking nature, it has a performance overhead. On
the other hand, ``NCCL_ASYNC_ERROR_HANDLING`` has very little
performance overhead, but crashes the process on errors. This is
done since CUDA execution is async and it is no longer safe to
continue executing user code since failed async NCCL operations
might result in subsequent CUDA operations running on corrupted
data. Only one of these two environment variables should be set.
backend (str or Backend, optional): The backend to use. Depending on
build-time configurations, valid values are ``gloo`` and ``nccl``.
By default uses the same backend as the global group. This field
should be given as a lowercase string (e.g., ``"gloo"``), which can
also be accessed via :class:`Backend` attributes (e.g.,
``Backend.GLOO``). If ``None`` is passed in, the backend
corresponding to the default process group will be used. Default is
``None``.
pg_options (ProcessGroupOptions, optional): process group options
specifying what additional options need to be passed in during
the construction of specific process groups. i.e. for the ``nccl``
backend, ``is_high_priority_stream`` can be specified so that
process group can pick up high priority cuda streams.
Returns:
The subgroup containing the current rank, and all the subgroups used for cleanup.
Examples:
>>> # Create two subgroups, where each has 2 processes.
>>> # xdoctest: +SKIP("need process group init")
>>> cur_subgroup, subgroups = dist.new_subgroups(ranks=[[0, 2], [1, 3]])
>>> rank = dist.get_rank()
>>> tensor = torch.ones(1, device=rank) * rank
>>> dist.all_reduce(tensor, group=cur_subgroup)
>>> tensor
tensor([2]) # Subgroup 0: ranks 0 and 2
tensor([4]) # Subgroup 1: ranks 1 and 3
"""
if not torch.cuda.is_available():
raise ValueError("Subgroups can only be created when CUDA is available")
if ranks_per_subgroup_list is None or len(ranks_per_subgroup_list) == 0:
raise ValueError("The arg 'ranks_per_subgroup_list' cannot be empty")
world_size = get_world_size()
subgroups = []
cur_subgroup = None
# Create a mapping from rank to subgroup to check if there is any subgroup overlap.
rank_to_ranks_dict = {} # type: ignore[var-annotated]
for ranks in ranks_per_subgroup_list:
subgroup = new_group(
ranks=ranks,
timeout=timeout,
backend=backend,
pg_options=pg_options,
)
subgroups.append(subgroup)
my_rank = get_rank()
for rank in ranks:
if rank in rank_to_ranks_dict:
raise ValueError(
"Rank {} has appeared in both subgroup {} and {}".format(
rank, rank_to_ranks_dict[rank], ranks
)
)
rank_to_ranks_dict[rank] = ranks
if my_rank == rank:
cur_subgroup = subgroup
logger.info("Rank {} is assigned to subgroup {}".format(rank, ranks))
return cur_subgroup, subgroups
| pytorch-master | torch/distributed/distributed_c10d.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from argparse import Action
class env(Action):
"""
Gets argument values from ``PET_{dest}`` before defaulting
to the given ``default`` value. For flags (e.g. ``--standalone``)
use ``check_env`` instead.
.. note:: when multiple option strings are specified, ``dest`` is
the longest option string (e.g. for ``"-f", "--foo"``
the env var to set is ``PET_FOO`` not ``PET_F``)
Example:
::
parser.add_argument("-f", "--foo", action=env, default="bar")
./program -> args.foo="bar"
./program -f baz -> args.foo="baz"
./program --foo baz -> args.foo="baz"
PET_FOO="env_bar" ./program -f baz -> args.foo="baz"
PET_FOO="env_bar" ./program --foo baz -> args.foo="baz"
PET_FOO="env_bar" ./program -> args.foo="env_bar"
parser.add_argument("-f", "--foo", action=env, required=True)
./program -> fails
./program -f baz -> args.foo="baz"
PET_FOO="env_bar" ./program -> args.foo="env_bar"
PET_FOO="env_bar" ./program -f baz -> args.foo="baz"
"""
def __init__(self, dest, default=None, required=False, **kwargs) -> None:
env_name = f"PET_{dest.upper()}"
default = os.environ.get(env_name, default)
# ``required`` means that it NEEDS to be present in the command-line args
# rather than "this option requires a value (either set explicitly or default"
# so if we found default then we don't "require" it to be in the command-line
# so set it to False
if default:
required = False
super().__init__(dest=dest, default=default, required=required, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class check_env(Action):
"""
For flags, checks whether the env var ``PET_{dest}`` exists
before defaulting to the given ``default`` value. Equivalent to
``store_true`` argparse built-in action except that the argument can
be omitted from the commandline if the env var is present and has a
non-zero value.
.. note:: it is redundant to pass ``default=True`` for arguments
that use this action because a flag should be ``True``
when present and ``False`` otherwise.
Example:
::
parser.add_argument("--verbose", action=check_env)
./program -> args.verbose=False
./program --verbose -> args.verbose=True
PET_VERBOSE=1 ./program -> args.verbose=True
PET_VERBOSE=0 ./program -> args.verbose=False
PET_VERBOSE=0 ./program --verbose -> args.verbose=True
Anti-pattern (don't do this):
::
parser.add_argument("--verbose", action=check_env, default=True)
./program -> args.verbose=True
./program --verbose -> args.verbose=True
PET_VERBOSE=1 ./program -> args.verbose=True
PET_VERBOSE=0 ./program -> args.verbose=False
"""
def __init__(self, dest, default=False, **kwargs) -> None:
env_name = f"PET_{dest.upper()}"
default = bool(int(os.environ.get(env_name, "1" if default else "0")))
super().__init__(dest=dest, const=True, default=default, nargs=0, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
| pytorch-master | torch/distributed/argparse_util.py |
from torch._C._distributed_c10d import _DEFAULT_PG_TIMEOUT
# Default process group wide timeout, if applicable.
# This only applies to the gloo and nccl backends
# (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).
# To make an attempt at backwards compatibility with THD, we use an
# extraordinarily high default timeout, given that THD did not have timeouts.
default_pg_timeout = _DEFAULT_PG_TIMEOUT
| pytorch-master | torch/distributed/constants.py |
import os
import sys
from enum import Enum
import torch
def is_available() -> bool:
"""
Returns ``True`` if the distributed package is available. Otherwise,
``torch.distributed`` does not expose any other APIs. Currently,
``torch.distributed`` is available on Linux, MacOS and Windows. Set
``USE_DISTRIBUTED=1`` to enable it when building PyTorch from source.
Currently, the default value is ``USE_DISTRIBUTED=1`` for Linux and Windows,
``USE_DISTRIBUTED=0`` for MacOS.
"""
return hasattr(torch._C, "_c10d_init")
if is_available() and not torch._C._c10d_init():
raise RuntimeError("Failed to initialize torch.distributed")
if is_available():
from torch._C._distributed_c10d import (
Store,
FileStore,
TCPStore,
ProcessGroup,
PrefixStore,
Reducer,
Logger,
BuiltinCommHookType,
GradBucket,
Work as _Work,
_DEFAULT_FIRST_BUCKET_BYTES,
_register_comm_hook,
_register_builtin_comm_hook,
_broadcast_coalesced,
_compute_bucket_assignment_by_size,
_verify_params_across_processes,
_test_python_store,
DebugLevel,
get_debug_level,
set_debug_level,
set_debug_level_from_env,
)
if sys.platform != "win32":
from torch._C._distributed_c10d import (
HashStore,
_round_robin_process_groups,
)
from .distributed_c10d import * # noqa: F403
# Variables prefixed with underscore are not auto imported
# See the comment in `distributed_c10d.py` above `_backend` on why we expose
# this.
from .distributed_c10d import (
_backend,
_all_gather_base,
_reduce_scatter_base,
_create_process_group_wrapper,
_rank_not_in_group,
)
from .rendezvous import (
_create_store_from_options,
)
from .remote_device import _remote_device
set_debug_level_from_env()
| pytorch-master | torch/distributed/__init__.py |
r"""
``torch.distributed.launch`` is a module that spawns up multiple distributed
training processes on each of the training nodes.
.. warning::
This module is going to be deprecated in favor of :ref:`torchrun <launcher-api>`.
The utility can be used for single-node distributed training, in which one or
more processes per node will be spawned. The utility can be used for either
CPU training or GPU training. If the utility is used for GPU training,
each distributed process will be operating on a single GPU. This can achieve
well-improved single-node training performance. It can also be used in
multi-node distributed training, by spawning up multiple processes on each node
for well-improved multi-node distributed training performance as well.
This will especially be benefitial for systems with multiple Infiniband
interfaces that have direct-GPU support, since all of them can be utilized for
aggregated communication bandwidth.
In both cases of single-node distributed training or multi-node distributed
training, this utility will launch the given number of processes per node
(``--nproc_per_node``). If used for GPU training, this number needs to be less
or equal to the number of GPUs on the current system (``nproc_per_node``),
and each process will be operating on a single GPU from *GPU 0 to
GPU (nproc_per_node - 1)*.
**How to use this module:**
1. Single-Node multi-process distributed training
::
python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other
arguments of your training script)
2. Multi-Node multi-process distributed training: (e.g. two nodes)
Node 1: *(IP: 192.168.1.1, and has a free port: 1234)*
::
python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
--nnodes=2 --node_rank=0 --master_addr="192.168.1.1"
--master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
and all other arguments of your training script)
Node 2:
::
python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
--nnodes=2 --node_rank=1 --master_addr="192.168.1.1"
--master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
and all other arguments of your training script)
3. To look up what optional arguments this module offers:
::
python -m torch.distributed.launch --help
**Important Notices:**
1. This utility and multi-process distributed (single-node or
multi-node) GPU training currently only achieves the best performance using
the NCCL distributed backend. Thus NCCL backend is the recommended backend to
use for GPU training.
2. In your training program, you must parse the command-line argument:
``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by this module.
If your training program uses GPUs, you should ensure that your code only
runs on the GPU device of LOCAL_PROCESS_RANK. This can be done by:
Parsing the local_rank argument
::
>>> # xdoctest: +SKIP
>>> import argparse
>>> parser = argparse.ArgumentParser()
>>> parser.add_argument("--local_rank", type=int)
>>> args = parser.parse_args()
Set your device to local rank using either
::
>>> torch.cuda.set_device(args.local_rank) # before your code runs
or
::
>>> with torch.cuda.device(args.local_rank):
>>> # your code to run
>>> ...
3. In your training program, you are supposed to call the following function
at the beginning to start the distributed backend. It is strongly recommended
that ``init_method=env://``. Other init methods (e.g. ``tcp://``) may work,
but ``env://`` is the one that is officially supported by this module.
::
>>> torch.distributed.init_process_group(backend='YOUR BACKEND',
>>> init_method='env://')
4. In your training program, you can either use regular distributed functions
or use :func:`torch.nn.parallel.DistributedDataParallel` module. If your
training program uses GPUs for training and you would like to use
:func:`torch.nn.parallel.DistributedDataParallel` module,
here is how to configure it.
::
>>> model = torch.nn.parallel.DistributedDataParallel(model,
>>> device_ids=[args.local_rank],
>>> output_device=args.local_rank)
Please ensure that ``device_ids`` argument is set to be the only GPU device id
that your code will be operating on. This is generally the local rank of the
process. In other words, the ``device_ids`` needs to be ``[args.local_rank]``,
and ``output_device`` needs to be ``args.local_rank`` in order to use this
utility
5. Another way to pass ``local_rank`` to the subprocesses via environment variable
``LOCAL_RANK``. This behavior is enabled when you launch the script with
``--use_env=True``. You must adjust the subprocess example above to replace
``args.local_rank`` with ``os.environ['LOCAL_RANK']``; the launcher
will not pass ``--local_rank`` when you specify this flag.
.. warning::
``local_rank`` is NOT globally unique: it is only unique per process
on a machine. Thus, don't use it to decide if you should, e.g.,
write to a networked filesystem. See
https://github.com/pytorch/pytorch/issues/12042 for an example of
how things can go wrong if you don't do this correctly.
"""
import logging
import warnings
from torch.distributed.run import get_args_parser, run
logger = logging.getLogger(__name__)
def parse_args(args):
parser = get_args_parser()
parser.add_argument(
"--use_env",
default=False,
action="store_true",
help="Use environment variable to pass "
"'local rank'. For legacy reasons, the default value is False. "
"If set to True, the script will not pass "
"--local_rank as argument, and will instead set LOCAL_RANK.",
)
return parser.parse_args(args)
def launch(args):
if args.no_python and not args.use_env:
raise ValueError(
"When using the '--no_python' flag,"
" you must also set the '--use_env' flag."
)
run(args)
def main(args=None):
warnings.warn(
"The module torch.distributed.launch is deprecated\n"
"and will be removed in future. Use torchrun.\n"
"Note that --use_env is set by default in torchrun.\n"
"If your script expects `--local_rank` argument to be set, please\n"
"change it to read from `os.environ['LOCAL_RANK']` instead. See \n"
"https://pytorch.org/docs/stable/distributed.html#launch-utility for \n"
"further instructions\n",
FutureWarning,
)
args = parse_args(args)
launch(args)
if __name__ == "__main__":
main()
| pytorch-master | torch/distributed/launch.py |
import collections
import torch
import torch.distributed as dist
from torch.nn.parallel._functions import _get_stream
from torch.nn.parallel.scatter_gather import ( # type: ignore[attr-defined]
is_namedtuple as _is_namedtuple
)
from typing import Dict, Any, List
__all__ = [] # type: ignore[var-annotated]
def _recursive_to(inputs, target_gpu, use_side_stream_for_tensor_copies):
r"""
Recursively moves input to the target_gpu.
"""
def to_map(obj):
if isinstance(obj, torch.Tensor):
if obj.device == torch.device("cuda", target_gpu):
return (obj,)
if not use_side_stream_for_tensor_copies:
return (obj.to(target_gpu),)
else:
# Perform CPU -> GPU copies in a background stream. This code is
# motivated from similar logic in torch/nn/parallel/_functions.py
stream = _get_stream(target_gpu)
with torch.cuda.stream(stream):
output = obj.to(target_gpu)
# synchronize with the copy stream
with torch.cuda.device(target_gpu):
current_stream = torch.cuda.current_stream()
# Sync the current stream with the copy stream
current_stream.wait_stream(stream)
# Ensure tensor memory is not reused until work on
# main stream is complete
output.record_stream(current_stream) # type: ignore[arg-type]
return (output,)
if _is_namedtuple(obj):
return [type(obj)(*args) for args in zip(*map(to_map, obj))]
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(to_map, obj)))
if isinstance(obj, str):
# Needs to be checked, otherwise it's taken as a sequence infinitely.
# This is because the elements of a string are also strings, and so on.
return [obj]
if isinstance(obj, collections.abc.Sequence) and len(obj) > 0:
try:
return [type(obj)(i) for i in zip(*map(to_map, obj))] # type: ignore[call-arg]
except TypeError:
# The sequence type may not support `__init__(iterable)` (e.g., `range`).
return [list(i) for i in zip(*map(to_map, obj))]
if isinstance(obj, collections.abc.Mapping) and len(obj) > 0:
try:
return [type(obj)(i) for i in zip(*map(to_map, obj.items()))] # type: ignore[call-arg]
except TypeError:
# The mapping type may not support `__init__(iterable)`.
return [dict(i) for i in zip(*map(to_map, obj.items()))]
return [obj]
# Avoid reference cycle
try:
res = to_map(inputs)
finally:
to_map = None # type: ignore[assignment]
return res
def _to_kwargs(inputs, kwargs, device_id, use_side_stream_for_tensor_copies):
inputs = (
_recursive_to(inputs, device_id, use_side_stream_for_tensor_copies)
if inputs
else []
)
kwargs = (
_recursive_to(kwargs, device_id, use_side_stream_for_tensor_copies)
if kwargs
else []
)
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
def _verify_param_shape_across_processes(process_group, tensors, logger=None):
return dist._verify_params_across_processes(process_group, tensors, logger)
def _sync_module_states(
module,
process_group,
broadcast_bucket_size,
src,
params_and_buffers_to_ignore,
):
"""
Syncs ``module``'s parameters and buffers state so that all ranks contain
the same module state across all ranks. Note that this API assumes that all
parameter shapes are consistent before running the synchronization. This can
be checked with ``_verify_param_shape_across_processes``.
"""
module_states = []
for name, param in module.named_parameters():
if name not in params_and_buffers_to_ignore:
module_states.append(param.detach())
for name, buffer in module.named_buffers():
if name not in params_and_buffers_to_ignore:
module_states.append(buffer.detach())
_sync_params_and_buffers(
process_group,
module_states,
broadcast_bucket_size,
src
)
def _sync_params_and_buffers(
process_group: dist.ProcessGroup,
module_states: List[torch.Tensor],
broadcast_bucket_size: int,
src: int,
):
"""
Synchronizes ``module_states`` (list of tensors) across all processes by
broadcasting them from rank 0.
"""
if len(module_states) > 0:
dist._broadcast_coalesced(
process_group, module_states, broadcast_bucket_size, src
)
def _replace_by_prefix(
state_dict: Dict[str, Any],
old_prefix: str,
new_prefix: str,
) -> None:
"""
Replace all keys that match a given old_prefix with a new_prefix (in-place).
Usage::
state_dict = {"layer.xyz": torch.tensor(1)}
replace_by_prefix_(state_dict, "layer.", "module.layer.")
assert state_dict == {"module.layer.xyz": torch.tensor(1)}
"""
if old_prefix == new_prefix:
raise ValueError("old_prefix and new_prefix must be distinct")
for key in list(state_dict.keys()):
if not key.startswith(old_prefix):
continue
new_key = new_prefix + key[len(old_prefix) :]
state_dict[new_key] = state_dict[key]
del state_dict[key]
| pytorch-master | torch/distributed/utils.py |
from typing import Optional, Union
import torch
class _remote_device(object):
"""
Represents a device on a remote worker.
Args:
remote_device (str or torch.device): Represents a device on a remote worker.
The string format should be one of the following:
1. "<workername>/<device>", where the device field can be parsed as torch.device type.
E.g., "trainer0/cpu", "trainer0", "ps0/cuda:0".
In addition, the device field can be optional and the default value is "cpu".
2. "rank:<rank>/<device>", where <rank> is the rank of the
process and device can be parsed as torch.device type.
E.g., "rank:0/cpu", "rank:0", "rank:0/cuda:0"
3. <workername> and <rank> are optional and formats like "cpu"
and "cuda:1", just represent local devices.
"""
def __init__(self, remote_device: Union[str, torch.device]):
PARSE_ERROR = (
f"Could not parse remote_device: {remote_device}. The valid format is "
"'<workername>/<device>' or 'rank:<rank>/<device>' or '<device>'"
)
self._worker_name = None
self._rank = None
self._device: Optional[Union[str, int, torch.device]] = None
if isinstance(remote_device, torch.device):
self._device = remote_device
elif isinstance(remote_device, str):
fields = remote_device.split("/")
if len(fields) == 2:
self._worker_name, self._device = fields
elif len(fields) == 1:
# Check if this is a valid device.
if _remote_device._is_valid_local_device(fields[0]):
self._device = fields[0]
else:
self._worker_name = fields[0]
self._device = "cpu"
else:
raise ValueError(PARSE_ERROR)
else:
raise TypeError(f'Invalid type for remote_device: {type(remote_device)}')
# Do some basic sanity check (no empty string)
if self._worker_name is not None and not self._worker_name:
raise ValueError(PARSE_ERROR)
# Validate the device.
self._device = torch.device(self._device)
# Check for rank based format.
if self._worker_name is not None:
fields = self._worker_name.split(":")
if len(fields) == 2:
# rank:<rank>/device format, extract rank
if fields[0] == "rank" and fields[1].isdigit():
self._rank = int(fields[1]) # type: ignore[assignment]
self._worker_name = None
else:
raise ValueError(PARSE_ERROR)
elif len(fields) > 2:
raise ValueError(PARSE_ERROR)
@staticmethod
def _is_valid_local_device(device):
# Check for torch.device
try:
torch.device(device)
return True
except Exception:
return False
def worker_name(self) -> Optional[str]:
"""
Returns the name of remote worker representing the remote device.
Returns ``None`` if no worker name is available.
"""
return self._worker_name
def rank(self) -> Optional[int]:
"""
Returns the rank of remote worker representing the remote device.
Returns ``None`` if no rank is available.
"""
return self._rank
def device(self) -> torch.device:
"""
Returns the local device on the remote worker.
"""
return self._device # type: ignore[return-value]
def __repr__(self):
if self._device is not None:
if self._worker_name is not None:
return f'{self._worker_name}/{self._device}'
elif self._rank is not None:
return f'rank:{self._rank}/{self._device}'
else:
return str(self._device)
else:
if self._worker_name is not None:
return f'{self._worker_name}'
elif self._rank is not None:
return f'{self._rank}'
else:
raise RuntimeError('Invalid state!')
def __eq__(self, other):
if not isinstance(other, _remote_device):
return False
if (
self._worker_name == other._worker_name
and self._device == other._device
and self._rank == other._rank
):
return True
return False
def __hash__(self):
return hash(self._worker_name) ^ \
hash(self._device) ^ \
hash(self._rank)
| pytorch-master | torch/distributed/remote_device.py |
# Keep old package for BC purposes, this file should be removed once
# everything moves to the `torch.distributed._shard` package.
import sys
import torch
import warnings
from torch.distributed._shard.sharding_spec import * # noqa: F403
warnings.warn(
"torch.distributed._sharding_spec will be deprecated, use torch.distributed._shard.sharding_spec instead",
DeprecationWarning
)
sys.modules['torch.distributed._sharding_spec'] = torch.distributed._shard.sharding_spec
| pytorch-master | torch/distributed/_sharding_spec/__init__.py |
pytorch-master | torch/distributed/pipeline/__init__.py |
|
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Multithreading in pipeline parallelism."""
from contextlib import contextmanager
from queue import Queue
import sys
from threading import Thread
from types import TracebackType
from typing import TYPE_CHECKING, Callable, Dict, Generator, List, Optional, Tuple, Type, Union, cast
import torch
from .microbatch import Batch
from .stream import AbstractStream, use_device, use_stream
__all__: List[str] = []
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
# Queue is generic only in stubs.
# https://mypy.readthedocs.io/en/latest/common_issues.html#using-classes-that-are-generic-in-stubs-but-not-at-runtime
if TYPE_CHECKING:
InQueue = Queue[Optional["Task"]]
OutQueue = Queue[Tuple[bool, Union[Tuple["Task", Batch], ExcInfo, None]]]
else:
InQueue = Queue
OutQueue = Queue
class Task:
"""A task represents how to compute a micro-batch on a partition.
It consists of two parts: :meth:`compute` and :meth:`finalize`.
:meth:`compute` should be executed in worker threads concurrently.
:meth:`finalize` should be executed after when worker threads complete to
execute :meth:`compute`.
:meth:`compute` might be boosted by worker threads. Because it produces
several CUDA API calls by user code. In PyTorch, parallel CUDA API calls
are not serialized through GIL. So more than one CUDA API call can be
produced at the same time.
"""
def __init__(
self, stream: AbstractStream, *, compute: Callable[[], Batch], finalize: Optional[Callable[[Batch], None]],
) -> None:
self.stream = stream
self._compute = compute
self._finalize = finalize
self._grad_enabled = torch.is_grad_enabled()
def compute(self) -> Batch:
with use_stream(self.stream), torch.set_grad_enabled(self._grad_enabled):
return self._compute()
def finalize(self, batch: Batch) -> None:
if self._finalize is None:
return
with use_stream(self.stream), torch.set_grad_enabled(self._grad_enabled):
self._finalize(batch)
def worker(in_queue: InQueue, out_queue: OutQueue, device: torch.device) -> None:
"""The main loop of a worker thread."""
with use_device(device):
while True:
task = in_queue.get()
if task is None:
break
try:
batch = task.compute()
except Exception:
exc_info = cast(ExcInfo, sys.exc_info())
out_queue.put((False, exc_info))
continue
out_queue.put((True, (task, batch)))
done = (False, None)
out_queue.put(done)
def create_workers(devices: List[torch.device],) -> Tuple[List[InQueue], List[OutQueue]]:
"""Spawns worker threads. A worker thread is bound to a device."""
in_queues: List[InQueue] = []
out_queues: List[OutQueue] = []
# Spawn workers.
workers: Dict[torch.device, Tuple[InQueue, OutQueue]] = {}
def normalize_device(device: torch.device) -> torch.device:
if device.type == "cuda" and device.index is None:
return torch.device("cuda", index=torch.cuda.current_device())
if device.type == "cpu" and device.index is not None:
return torch.device("cpu")
return device
for device in devices:
device = normalize_device(device)
try:
in_queue, out_queue = workers[device]
except KeyError:
in_queue = Queue()
out_queue = Queue()
workers[device] = (in_queue, out_queue)
t = Thread(target=worker, args=(in_queue, out_queue, device), daemon=True,)
t.start()
in_queues.append(in_queue)
out_queues.append(out_queue)
return (in_queues, out_queues)
@contextmanager
def spawn_workers(devices: List[torch.device],) -> Generator[Tuple[List[InQueue], List[OutQueue]], None, None]:
try:
(in_queues, out_queues) = create_workers(devices)
yield (in_queues, out_queues)
finally:
pass
| pytorch-master | torch/distributed/pipeline/sync/worker.py |
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Provides phony for arbitrary dependency in a autograd graph."""
from typing import Dict, List, Tuple
import torch
from torch import Tensor
from .stream import default_stream, use_stream
__all__: List[str] = []
_phonies: Dict[Tuple[torch.device, bool], Tensor] = {}
def get_phony(device: torch.device, *, requires_grad: bool) -> Tensor:
"""Gets a phony. Phony is tensor without space. It is useful to make
arbitrary dependency in a autograd graph because it doesn't require any
gradient accumulation.
.. note::
Phonies for each device are cached. If an autograd function gets a phony
internally, the phony must be detached to be returned. Otherwise, the
autograd engine will mutate the cached phony in-place::
class Phonify(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
phony = get_phony(input.device, requires_grad=False)
return phony.detach() # detach() is necessary.
"""
key = (device, requires_grad)
try:
phony = _phonies[key]
except KeyError:
with use_stream(default_stream(device)):
phony = torch.empty(0, device=device, requires_grad=requires_grad)
_phonies[key] = phony
return phony
| pytorch-master | torch/distributed/pipeline/sync/phony.py |
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Checkpointing with preceding recomputation.
PyTorch already provides the official checkpointing utilities in
:mod:`torch.utils.checkpoint`. The official checkpointing combines
recomputation and recursive backpropagation into one autograd function named
``CheckpointFunction``. Hence, the recomputation can be started only when the
gradients arrive to the function. In Pipe, the recomputation needs to precede
the gradient arrival to minimize the GPU idle time.
We solve this problem by introducing separate autograd functions named
:class:`Recompute` and :class:`Checkpoint`. Each function represents
recomputation and recursive backpropagation, respectively. We can manipulate
the control flow in aspect of both the autograd engine and CUDA with a pair of
the functions.
Specifically, we place CUDA stream synchronization between :class:`Recompute`
and :class:`Checkpoint` to delay only :class:`Checkpoint` until the gradient is
copied entirely.
"""
from collections import deque
from contextlib import contextmanager
import threading
from typing import (
TYPE_CHECKING,
Any,
Deque,
Generator,
List,
Optional,
Union,
Sequence,
Tuple
)
import torch
from torch import Tensor
import torch.autograd
from .dependency import fork, join
from .microbatch import Batch
from .phony import get_phony
__all__ = ["is_checkpointing", "is_recomputing"]
Tensors = Sequence[Tensor]
TensorOrTensors = Union[Tensor, Tensors]
# Types for shared memory between Checkpoint and Recompute.
Recomputed = Tuple[TensorOrTensors, Tensors] # (output, input_leaf)
RNGStates = Tuple[Tensor, Optional[Tensor]] # (cpu_rng_state, gpu_rng_state)
if TYPE_CHECKING:
from typing_extensions import Protocol
else:
Protocol = object
# Protocol with __call__ instead of Callable can be used as an attribute type.
# See: https://github.com/python/mypy/issues/708#issuecomment-561735949
class Function(Protocol):
def __call__(self, input: TensorOrTensors) -> TensorOrTensors:
...
def checkpoint(function: Function, input):
"""Makes a checkpoint with a simple interface like
:func:`torch.utils.checkpoint.checkpoint`. It's only used to test or debug
:class:`Checkpoint` and :class:`Recompute` without boilerplate.
"""
batch = Batch(input)
chk = Checkpointing(function, batch)
batch = chk.checkpoint()
chk.recompute(batch)
return batch.values
class Checkpointing:
"""Generates a pair of :class:`Checkpoint` and :class:`Recompute`."""
def __init__(self, function: Function, batch: Batch) -> None:
self.function = function
self.batch = batch
# Shared memory between Checkpoint and Recompute. 1-length deque is
# used for mutability and length limitation.
self.recomputed: Deque[Recomputed] = deque(maxlen=1)
self.rng_states: Deque[RNGStates] = deque(maxlen=1)
def checkpoint(self) -> Batch:
"""Returns a batch applied by :class:`Checkpoint`."""
input_atomic = self.batch.atomic
inputs = tuple(self.batch)
# Use a phony which requires grad to ensure that Checkpoint can be
# tracked by the autograd engine even when none of the input tensors
# require grad.
phony = get_phony(self.batch.get_device(), requires_grad=True)
output = Checkpoint.apply(phony, self.recomputed, self.rng_states, self.function, input_atomic, *inputs)
# Gradients are only supported for float Tensors.
if isinstance(output, tuple):
output = tuple([x.detach() if torch.is_tensor(x) and not x.is_floating_point() else x for x in output])
return Batch(output)
def recompute(self, batch: Batch) -> None:
"""Applies :class:`Recompute` to the batch in place."""
input_atomic = self.batch.atomic
inputs = tuple(self.batch)
# Use a tensor in the batch to tie together fork-join
tensor_idx = batch.find_tensor_idx()
# batch[tensor_idx] is always requiring grad, because it has been passed
# checkpoint with a phony requiring grad.
batch[tensor_idx], phony = fork(batch[tensor_idx])
phony = Recompute.apply(phony, self.recomputed, self.rng_states, self.function, input_atomic, *inputs)
batch[tensor_idx] = join(batch[tensor_idx], phony)
class ThreadLocal(threading.local):
def __init__(self) -> None:
self.is_checkpointing = False
self.is_recomputing = False
thread_local = ThreadLocal()
@contextmanager
def enable_checkpointing() -> Generator[None, None, None]:
"""Makes :func:`is_checkpointing` return :data:`True` within a context."""
orig = thread_local.is_checkpointing
thread_local.is_checkpointing = True
try:
yield
finally:
thread_local.is_checkpointing = orig
@contextmanager
def enable_recomputing() -> Generator[None, None, None]:
"""Makes :func:`is_recomputing` return :data:`True` within a context."""
orig = thread_local.is_recomputing
thread_local.is_recomputing = True
try:
yield
finally:
thread_local.is_recomputing = orig
def is_checkpointing() -> bool:
"""Whether the current forward propagation is under checkpointing.
Returns:
bool: :data:`True` if it's under checkpointing.
"""
return thread_local.is_checkpointing
def is_recomputing() -> bool:
"""Whether the current forward propagation is under checkpoint
recomputation. Use this to prevent duplicated side-effects at forward
propagation::
class Counter(nn.Module):
def __init__(self):
super().__init__()
self.counter = 0
def forward(self, input):
if not is_recomputing():
self.counter += 1
return input
Returns:
bool: :data:`True` if it's under checkpoint recomputation.
.. seealso:: :ref:`Detecting Recomputation`
"""
return thread_local.is_recomputing
class Context:
"""The common interface between the :class:`Checkpoint` and
:class:`Recompute` context.
"""
recomputed: Deque[Recomputed]
rng_states: Deque[RNGStates]
function: Function
input_atomic: bool
inputs: Sequence[Any]
saved_tensors: Tuple[Tensor, ...]
def save_for_backward(self, *tensors: Tensor) -> None: # pragma: no cover
pass
def save_rng_states(device: torch.device, rng_states: Deque[RNGStates],) -> None:
""":meth:`Checkpoint.forward` captures the current PyTorch's random number
generator states at CPU and GPU to reuse in :meth:`Recompute.backward`.
.. seealso:: :ref:`Referential Transparency`
"""
cpu_rng_state = torch.get_rng_state()
gpu_rng_state: Optional[Tensor]
if device.type == "cuda":
gpu_rng_state = torch.cuda.get_rng_state(device)
else:
gpu_rng_state = None
rng_states.append((cpu_rng_state, gpu_rng_state))
@contextmanager
def restore_rng_states(device: torch.device, rng_states: Deque[RNGStates],) -> Generator[None, None, None]:
""":meth:`Recompute.backward` restores the random number generator states
captured by :func:`save_rng_states` within its context.
.. seealso:: :ref:`Referential Transparency`
"""
cpu_rng_state, gpu_rng_state = rng_states.pop()
gpu_devices: List[torch.device] = []
if device.type == "cuda":
gpu_devices.append(device)
with torch.random.fork_rng(gpu_devices):
torch.set_rng_state(cpu_rng_state)
if gpu_rng_state is not None:
torch.cuda.set_rng_state(gpu_rng_state, device)
yield
class Checkpoint(torch.autograd.Function):
@staticmethod
# type: ignore[override]
def forward(
ctx: Context,
phony: Tensor,
recomputed: Deque[Recomputed],
rng_states: Deque[RNGStates],
function: Function,
input_atomic: bool,
*inputs,
):
ctx.recomputed = recomputed
ctx.rng_states = rng_states
save_rng_states(phony.device, ctx.rng_states)
ctx.function = function
ctx.input_atomic = input_atomic
if input_atomic:
tensors = [inputs[0]]
else:
tensors = []
for input in inputs:
if torch.is_tensor(input):
tensors.append(input)
ctx.save_for_backward(*tensors)
with torch.no_grad(), enable_checkpointing():
if input_atomic:
assert len(inputs) == 1
output = function(inputs[0])
else:
output = function(*inputs)
return output
@staticmethod
def backward(ctx: Context, *grad_output: Tensor,) -> Tuple[Optional[Tensor], ...]: # pragma: no cover
output, input_leaf = ctx.recomputed.pop()
if isinstance(output, tuple):
outputs = output
else:
outputs = (output,)
if any(torch.is_tensor(y) and y.requires_grad for y in outputs):
tensors = tuple([x for x in outputs if torch.is_tensor(x) and x.requires_grad])
torch.autograd.backward(tensors, grad_output)
grad_input: List[Optional[Tensor]] = [None, None, None, None, None]
grad_input.extend(x.grad if torch.is_tensor(x) else None for x in input_leaf)
return tuple(grad_input)
class Recompute(torch.autograd.Function):
@staticmethod
# type: ignore[override]
def forward(
ctx: Context,
phony: Tensor,
recomputed: Deque[Recomputed],
rng_states: Deque[RNGStates],
function: Function,
input_atomic: bool,
*inputs,
) -> Tensor:
ctx.recomputed = recomputed
ctx.rng_states = rng_states
ctx.function = function
ctx.input_atomic = input_atomic
ctx.inputs = inputs
if input_atomic:
tensors = [inputs[0]]
else:
tensors = []
for input in inputs:
if torch.is_tensor(input):
tensors.append(input)
ctx.save_for_backward(*tensors)
return phony
@staticmethod
def backward(ctx: Context, *grad_output: Tensor) -> Tuple[None, ...]: # pragma: no cover
inputs = ctx.inputs
inputs_leaf = tuple(x.detach().requires_grad_(x.requires_grad) if torch.is_tensor(x) else x for x in inputs)
# Get the device for the inputs from a tensor
device = None
for input in inputs:
if torch.is_tensor(input):
device = input.device
break
if device is None:
raise RuntimeError(f'No tensors found in {inputs}')
with restore_rng_states(device, ctx.rng_states):
with torch.enable_grad(), enable_recomputing():
if ctx.input_atomic:
assert len(inputs_leaf) == 1
output = ctx.function(inputs_leaf[0])
else:
output = ctx.function(*inputs_leaf)
ctx.recomputed.append((output, inputs_leaf))
grad_input: List[None] = [None, None, None, None, None]
grad_input.extend(None for _ in ctx.inputs)
return tuple(grad_input)
| pytorch-master | torch/distributed/pipeline/sync/checkpoint.py |
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Tracks the running statistics per mini-batch instead of micro-batch."""
from typing import TypeVar, cast
import torch
from torch import Tensor, nn
from torch.nn.functional import batch_norm
from torch.nn.modules.batchnorm import _BatchNorm
from .checkpoint import is_recomputing
__all__ = ["DeferredBatchNorm"]
TModule = TypeVar("TModule", bound=nn.Module)
class DeferredBatchNorm(_BatchNorm):
"""A BatchNorm layer tracks multiple micro-batches to update running
statistics per mini-batch.
"""
sum: Tensor
sum_squares: Tensor
running_mean: Tensor
running_var: Tensor
num_batches_tracked: Tensor
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: float = 0.1,
affine: bool = True,
chunks: int = 1,
) -> None:
super().__init__(num_features, eps, momentum, affine, track_running_stats=True)
self.register_buffer("sum", torch.zeros_like(self.running_mean))
self.register_buffer("sum_squares", torch.zeros_like(self.running_var))
self.counter = 0
self.tracked = 0
self.chunks = chunks
def _check_input_dim(self, input: Tensor) -> None:
# It's the typical _check_input_dim() implementation in PyTorch.
if input.dim() <= 2:
raise ValueError("expected at least 3D input (got %dD input)" % input.dim())
def _track(self, input: Tensor) -> bool:
"""Tracks statistics of a micro-batch."""
# Dimensions except channel. For example, (0, 2, 3) is for BatchNorm2d.
dim = [0]
dim.extend(range(2, input.dim()))
with torch.no_grad():
self.sum += input.sum(dim)
self.sum_squares += (input ** 2).sum(dim)
size = input.size().numel() // input.size(1)
self.counter += size
self.tracked += 1
return self.tracked == self.chunks
def _commit(self) -> None:
"""Updates the running statistics of a mini-batch."""
exponential_average_factor = 0.0
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
mean = self.sum / self.counter
var = self.sum_squares / self.counter - mean ** 2
# Calculate the exponential moving average here.
m = exponential_average_factor
self.running_mean *= 1 - m
self.running_mean += mean * m
self.running_var *= 1 - m
self.running_var += var * m
self.sum.zero_()
self.sum_squares.zero_()
self.counter = 0
self.tracked = 0
def forward(self, input: Tensor) -> Tensor:
if not self.training:
# Don't train parameters on the evaluation mode.
return batch_norm(
input,
running_mean=self.running_mean,
running_var=self.running_var,
weight=self.weight,
bias=self.bias,
training=False,
momentum=0.0,
eps=self.eps,
)
if not is_recomputing():
# Track a micro-batch on the training mode
# but not under a recomputation.
tracked_enough = self._track(input)
# Update the running statistics for a mini-batch
# if it has tracked enough micro-batches.
if tracked_enough:
self._commit()
# Normalize a micro-batch and train the parameters.
return batch_norm(
input,
running_mean=None,
running_var=None,
weight=self.weight,
bias=self.bias,
training=True,
momentum=0.0,
eps=self.eps,
)
@classmethod
def convert_deferred_batch_norm(cls, module: TModule, chunks: int = 1) -> TModule:
"""Converts a :class:`nn.BatchNorm` or underlying
:class:`nn.BatchNorm`s into :class:`DeferredBatchNorm`::
from torchvision.models.resnet import resnet101
from torchpipe.batchnorm import DeferredBatchNorm
model = resnet101()
model = DeferredBatchNorm.convert_deferred_batch_norm(model)
"""
if isinstance(module, DeferredBatchNorm) and module.chunks is chunks:
return cast(TModule, module)
module_output: nn.Module = module
if isinstance(module, _BatchNorm) and module.track_running_stats:
module_output = DeferredBatchNorm(module.num_features, module.eps, module.momentum, module.affine, chunks)
if module.affine:
module_output.register_parameter("weight", module.weight)
module_output.register_parameter("bias", module.bias)
module_output.register_buffer("running_mean", module.running_mean)
module_output.register_buffer("running_var", module.running_var)
module_output.register_buffer("num_batches_tracked", module.num_batches_tracked)
for name, child in module.named_children():
module_output.add_module(name, cls.convert_deferred_batch_norm(child, chunks))
return cast(TModule, module_output)
| pytorch-master | torch/distributed/pipeline/sync/batchnorm.py |
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""A Pipe implementation in PyTorch."""
from .checkpoint import is_checkpointing, is_recomputing
from .pipe import Pipe, WithDevice
from .microbatch import NoChunk
__all__ = ["Pipe", "is_checkpointing", "is_recomputing"]
| pytorch-master | torch/distributed/pipeline/sync/__init__.py |
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Autograd functions for stream-aware CUDA copy. It is used to overlap copy
and computation on the same GPU.
"""
from collections import deque
from typing import Deque, List, Optional, Tuple, Sequence
import torch
from torch import Tensor
from .stream import AbstractStream, current_stream, get_device, record_stream, use_stream, wait_stream
__all__: List[str] = []
Tensors = Sequence[Tensor]
# Common interface between :class:`Copy` and :class:`Wait`.
class Context:
prev_stream: AbstractStream
next_stream: AbstractStream
class Copy(torch.autograd.Function):
"""Copies tensors on specific streams."""
@staticmethod
# type: ignore[override]
def forward(ctx: Context, prev_stream: AbstractStream, next_stream: AbstractStream, *input,) -> Tensors:
ctx.prev_stream = prev_stream
ctx.next_stream = next_stream
output = []
output_stream = current_stream(get_device(next_stream))
with use_stream(prev_stream), use_stream(next_stream):
for x in input:
if torch.is_tensor(x):
y = x.to(get_device(next_stream), non_blocking=True)
output.append(y)
# 'prev_stream' is not where 'x' has been allocated.
record_stream(x, prev_stream)
# 'y' has been allocated on 'next_stream'.
# It might be used on the current stream captured as 'output_stream'.
record_stream(y, output_stream)
else:
output.append(x)
return tuple(output)
@staticmethod
def backward(ctx: Context, *grad_output: Tensor,) -> Tuple[Optional[Tensor], ...]:
prev_stream = ctx.prev_stream
next_stream = ctx.next_stream
grad_input: Deque[Tensor] = deque(maxlen=len(grad_output))
input_stream = current_stream(get_device(prev_stream))
with use_stream(prev_stream), use_stream(next_stream):
for x in reversed(grad_output):
y = x.to(get_device(prev_stream), non_blocking=True)
grad_input.appendleft(y)
# 'next_stream' is not where 'x' has been allocated.
record_stream(x, next_stream)
# 'y' has been allocated on 'prev_stream'.
# It might be used on the current stream captured as 'input_stream'.
record_stream(y, input_stream)
grad_streams: Tuple[Optional[Tensor], ...] = (None, None)
return grad_streams + tuple(grad_input)
class Wait(torch.autograd.Function):
"""Synchronizes a stream to another stream.
Place it just before you want to start an operation on the next stream,
provided that all operations on the previous stream are done.
"""
@staticmethod
# type: ignore[override]
def forward(ctx: Context, prev_stream: AbstractStream, next_stream: AbstractStream, *input) -> Tensors:
ctx.prev_stream = prev_stream
ctx.next_stream = next_stream
wait_stream(next_stream, prev_stream)
return tuple(x.detach() if torch.is_tensor(x) else x for x in input)
@staticmethod
def backward(ctx: Context, *grad_input: Tensor,) -> Tuple[Optional[Tensor], ...]:
prev_stream = ctx.prev_stream
next_stream = ctx.next_stream
wait_stream(prev_stream, next_stream)
grad_streams: Tuple[Optional[Tensor], ...] = (None, None)
return grad_streams + grad_input
| pytorch-master | torch/distributed/pipeline/sync/copy.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.