python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# flake8: noqa F401
"""The implementations should be moved here as soon as their deprecation period is over."""
from torch.testing._legacy import (
_validate_dtypes,
_dispatch_dtypes,
all_types,
all_types_and,
all_types_and_complex,
all_types_and_complex_and,
all_types_and_half,
complex_types,
complex_types_and,
empty_types,
floating_and_complex_types,
floating_and_complex_types_and,
floating_types,
floating_types_and,
double_types,
floating_types_and_half,
get_all_complex_dtypes,
get_all_dtypes,
get_all_fp_dtypes,
get_all_int_dtypes,
get_all_math_dtypes,
integral_types,
integral_types_and,
)
| pytorch-master | torch/testing/_internal/common_dtype.py |
from abc import abstractmethod
import math
import tempfile
import unittest
from copy import deepcopy
from functools import reduce, partial, wraps
from itertools import product
from operator import mul
from math import pi
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import _reduction as _Reduction
from torch.testing._internal.common_utils import TestCase, to_gpu, freeze_rng_state, is_iterable, \
TEST_WITH_ROCM, gradcheck, gradgradcheck
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.autograd.gradcheck import _get_numerical_jacobian, _iter_tensors
from torch.autograd import Variable
from torch.types import _TensorOrTensors
import torch.backends.cudnn
from typing import Dict, Callable, Tuple, List, Sequence, Union, Any
TemporaryFile = tempfile.TemporaryFile
PRECISION = 1e-5
def get_reduction(m):
result = getattr(m, 'reduction', None)
if result is None:
result = _Reduction.legacy_get_string(getattr(m, 'sizeAverage', None), True, emit_warning=False)
assert result is not None
return result
def get_weight(m):
result = getattr(m, 'weight', None)
if result is not None:
return result
return getattr(m, 'weights', None)
# NOTE [How to check NN module / functional API parity between Python and C++ frontends]
#
# The way to check API parity is to add parity tests for the NN module / functional of interest.
# Here are the detailed steps:
#
# For NN module:
# 1. Make sure you already have a test dict with the module configuration you want to test.
# 2. Add `cpp_constructor_args` entry to the test dict, with its value exactly matching
# the Python module constructor arguments. For example, if in the test dict we pass
# `(10, 8)` to `torch.nn.Linear` constructor, then we should pass `torch::nn::LinearOptions(10, 8)`
# as the corresponding C++ constructor argument to `torch::nn::Linear`.
# 3. If in the process of performing the above step you referenced any variables
# in the `cpp_constructor_args` entry, you must add `cpp_var_map` entry
# to the test dict to make sure that those variables are populated with the right Python values.
# For example, if the Python constructor call is
# `torch.nn.FractionalMaxPool2d(2, output_ratio=0.5, _random_samples=random_samples)`,
# the corresponding C++ constructor argument is
# `torch::nn::FractionalMaxPool2dOptions(2).output_ratio(0.5)._random_samples(random_samples)`,
# and the `cpp_var_map` entry must be
# `{'random_samples': random_samples}` in order to populate the C++ variable `random_samples`
# used in the C++ constructor argument with the Python tensor value `random_samples`.
#
# For NN functional:
# 1. Make sure you already have a test dict with the functional configuration you want to test.
# 2. If the test dict's `constructor` entry looks like `wrap_functional(F.some_functional_name, ...)`,
# then you must add `cpp_options_args` entry to the test dict, with its value exactly matching the Python
# functional optional arguments. For example, if the test dict's `constructor` entry is
# `wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest')`,
# then the `cpp_options_args` entry should be
# "F::InterpolateFuncOptions().size(std::vector<int64_t>({12})).scale_factor(c10::nullopt).mode(torch::kNearest)".
# 3. Otherwise, if the test dict's `constructor` entry looks like
# `wrap_functional(lambda i: F.some_functional_name(...))`,
# then you must add `cpp_function_call` entry to the test dict, with its value exactly matching the Python
# functional function call. For example, if the test dict's `constructor` entry is
# `wrap_functional(lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none'))`,
# then the `cpp_function_call` entry should be
# "F::poisson_nll_loss(i, t.to(i.options()), F::PoissonNLLLossFuncOptions().reduction(torch::kNone))".
# 4. If in the process of performing the above two steps you referenced any variables
# in the `cpp_options_args` or `cpp_function_call` entry, you must
# add `cpp_var_map` entry to the test dict to make sure that those variables
# are populated with the right Python values. For example, if the test dict's `constructor` entry is
# `wrap_functional(lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none'))`,
# then the `cpp_function_call` entry should be
# "F::poisson_nll_loss(i, t.to(i.options()), F::PoissonNLLLossFuncOptions().reduction(torch::kNone))".
# Notice that there are two variables `i` and `t` that need to have their values provided,
# and the way to do so is to add a `cpp_var_map` entry: `cpp_var_map={'i': '_get_input()', 't': t}`.
# (Note that for `i`, since we want it to take the Python input value, we pass '_get_input()' string as value
# and the C++ parity test mechanism will populate `i` with the Python input value correctly.)
#
# There are also a few optional flags in the test dict to control the C++ parity test behavior:
#
# - `test_cpp_api_parity`: if `False`, skips the C++ parity test for this test dict. Default: True.
# - `has_parity`: if `False`, expects this test dict to fail the C++ parity test. Default: True.
module_tests = [
dict(
module_name='Linear',
constructor_args=(10, 8),
cpp_constructor_args='torch::nn::LinearOptions(10, 8)',
input_size=(4, 10),
reference_fn=lambda i, p, _: torch.mm(i, p[0].t()) + p[1].view(1, -1).expand(4, 8),
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='Linear',
constructor_args=(10, 8, False),
cpp_constructor_args='torch::nn::LinearOptions(10, 8).bias(false)',
input_size=(4, 10),
desc='no_bias',
reference_fn=lambda i, p, _: torch.mm(i, p[0].t()),
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='Threshold',
constructor_args=(2., 1.),
cpp_constructor_args='torch::nn::ThresholdOptions(2., 1.)',
input_size=(2, 3, 4, 5),
check_inplace=True,
desc='threshold_value'
),
dict(
module_name='Threshold',
constructor_args=(2., 10.),
cpp_constructor_args='torch::nn::ThresholdOptions(2., 10.)',
input_size=(2, 3, 4, 5),
desc='large_value'
),
dict(
module_name='ReLU',
input_size=(2, 3, 4, 5),
check_inplace=True,
),
dict(
module_name='ReLU6',
input_size=(2, 3, 4, 5),
check_inplace=True,
),
dict(
module_name='RReLU',
input_size=(1, 2, 2),
test_cuda=False,
),
dict(
module_name='RReLU',
constructor_args=(0.1, 0.9),
cpp_constructor_args='torch::nn::RReLUOptions().lower(0.1).upper(0.9)',
input_size=(4, 4, 5),
desc='with_up_down',
test_cuda=False,
),
dict(
module_name='Hardtanh',
input_size=(3, 2, 5),
reference_fn=lambda i, *_: i.clamp(-1, 1),
),
dict(
module_name='Sigmoid',
input_size=(2, 3, 4, 5),
),
dict(
module_name='Tanh',
input_size=(2, 3, 4, 5),
),
dict(
module_name='Flatten',
input_size=(2, 3, 4, 5),
reference_fn=lambda i, *_: torch.flatten(i, 1)
),
dict(
module_name='Softmax',
constructor_args=(1,),
cpp_constructor_args='torch::nn::SoftmaxOptions(1)',
input_size=(10, 20),
reference_fn=lambda i, *_: torch.exp(i).div(torch.exp(i).sum(1, True).expand(10, 20)),
),
dict(
module_name='Softmax2d',
input_size=(1, 3, 10, 20),
reference_fn=lambda i, *_: torch.exp(i).div(torch.exp(i).sum(1, False)),
),
dict(
module_name='LogSoftmax',
constructor_args=(1,),
cpp_constructor_args='torch::nn::LogSoftmaxOptions(1)',
input_size=(10, 20),
reference_fn=lambda i, *_: torch.exp(i).div_(torch.exp(i).sum(1, True).expand(10, 20)).log_(),
),
dict(
module_name='LogSoftmax',
constructor_args=(1,),
cpp_constructor_args='torch::nn::LogSoftmaxOptions(1)',
input_size=(1, 3, 10, 20),
reference_fn=lambda i, *_: torch.exp(i).div_(torch.exp(i).sum(1, False)).log_(),
desc='multiparam',
),
dict(
module_name='ELU',
constructor_args=(2.,),
cpp_constructor_args='torch::nn::ELUOptions().alpha(2.)',
input_size=(3, 2, 5),
reference_fn=lambda x, *_: torch.where(x >= 0, x, 2 * (x.exp() - 1)),
),
# TODO: reference function
dict(
module_name='Hardshrink',
constructor_args=(2.,),
cpp_constructor_args='torch::nn::HardshrinkOptions(2.)',
input_size=(4, 3, 2, 4),
),
dict(
module_name='LeakyReLU',
input_size=(3, 2, 5),
check_inplace=True
),
dict(
module_name='LeakyReLU',
constructor_args=(0.5,),
cpp_constructor_args='torch::nn::LeakyReLUOptions().negative_slope(0.5)',
input_size=(3, 2, 5),
check_inplace=True,
desc='with_negval'
),
dict(
module_name='LeakyReLU',
constructor_args=(0.0,),
cpp_constructor_args='torch::nn::LeakyReLUOptions().negative_slope(0.0)',
input_fn=lambda: torch.randn(10, 10),
check_inplace=True,
desc='with_zero_negval'
),
dict(
module_name='LogSigmoid',
input_size=(2, 3, 4),
reference_fn=lambda i, *_: i.sigmoid().log(),
),
dict(
module_name='Softplus',
input_size=(10, 20),
reference_fn=lambda i, *_: torch.log(1 + torch.exp(i)),
),
dict(
module_name='Softplus',
constructor_args=(2,),
cpp_constructor_args='torch::nn::SoftplusOptions().beta(2)',
input_size=(10, 20),
reference_fn=lambda i, *_: 1. / 2. * torch.log(1 + torch.exp(2 * i)),
desc='beta',
),
dict(
module_name='Softplus',
constructor_args=(2, -100),
cpp_constructor_args='torch::nn::SoftplusOptions().beta(2).threshold(-100)',
input_size=(10, 20),
reference_fn=(
lambda i, *_: ((i * 2) > -100).type_as(i) * i
+ ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log(1 + torch.exp(2 * i))
),
desc='beta_threshold',
),
dict(
module_name='Softshrink',
input_size=(3, 2, 5),
),
dict(
module_name='Softshrink',
constructor_args=(1,),
cpp_constructor_args='torch::nn::SoftshrinkOptions(1)',
input_size=(3, 2, 5),
desc='lambda',
),
dict(
module_name='CrossMapLRN2d',
constructor_args=(5, 5e-3, 1e-3, 2),
cpp_constructor_args='torch::nn::CrossMapLRN2dOptions(5).alpha(5e-3).beta(1e-3).k(2)',
input_size=(2, 3, 6, 6),
check_gradgrad=False,
# TODO(#50743): Figure out the error. "RuntimeError: Unrecognized tensor type ID: Batched"
check_batched_grad=False,
),
dict(
module_name='PReLU',
input_size=(2, 3, 4),
reference_fn=lambda i, p, _: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
desc='1d',
),
dict(
module_name='PReLU',
constructor_args=(3,),
cpp_constructor_args='torch::nn::PReLUOptions().num_parameters(3)',
input_size=(2, 3, 4),
desc='1d_multiparam',
reference_fn=lambda i, p, _: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
),
dict(
module_name='PReLU',
input_size=(2, 3, 4, 5),
desc='2d',
reference_fn=lambda i, p, _: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
),
dict(
module_name='PReLU',
constructor_args=(3,),
cpp_constructor_args='torch::nn::PReLUOptions().num_parameters(3)',
input_size=(2, 3, 4, 5),
desc='2d_multiparam',
reference_fn=lambda i, p, _: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
),
dict(
module_name='PReLU',
input_size=(2, 3, 4, 5, 6),
reference_fn=lambda i, p, _: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
desc='3d',
),
dict(
module_name='PReLU',
constructor_args=(3,),
cpp_constructor_args='torch::nn::PReLUOptions().num_parameters(3)',
input_size=(2, 3, 4, 5, 6),
desc='3d_multiparam',
reference_fn=lambda i, p, _: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
),
dict(
module_name='Softsign',
input_size=(3, 2, 5),
reference_fn=lambda i, *_: i.div(1 + torch.abs(i)),
),
dict(
module_name='Softmin',
constructor_args=(1,),
cpp_constructor_args='torch::nn::SoftminOptions(1)',
input_size=(10, 20),
),
dict(
module_name='Softmin',
constructor_args=(1,),
cpp_constructor_args='torch::nn::SoftminOptions(1)',
input_size=(2, 3, 5, 10),
desc='multidim',
),
dict(
module_name='Tanhshrink',
input_size=(2, 3, 4, 5),
),
]
# Generates rand tensor with non-equal values. This ensures that duplicate
# values won't be causing test failure for modules like MaxPooling.
# size should be small, otherwise randperm fails / long overflows.
def _rand_tensor_non_equal(*size):
total = reduce(mul, size, 1)
return torch.randperm(total).view(*size).double()
def wrap_functional(fn, **kwargs):
class FunctionalModule(nn.Module):
def forward(self, *args):
return fn(*args, **kwargs)
return FunctionalModule
def poissonnllloss_no_reduce_test():
t = torch.randn(10, 10)
return dict(
fullname='PoissonNLLLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none')),
cpp_function_call='F::poisson_nll_loss('
'i, t.to(i.options()), F::PoissonNLLLossFuncOptions().reduction(torch::kNone))',
input_fn=lambda: torch.rand(10, 10),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_: i.exp() - t.mul(i),
pickle=False)
def bceloss_no_reduce_test():
t = Variable(torch.randn(15, 10).gt(0).double())
return dict(
fullname='BCELoss_no_reduce',
constructor=wrap_functional(
lambda i: F.binary_cross_entropy(i, t.type_as(i), reduction='none')),
cpp_function_call='F::binary_cross_entropy('
'i, t.to(i.options()), F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))',
input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_: -(t * i.log() + (1 - t) * (1 - i).log()),
pickle=False,
precision=7e-4)
def bceloss_no_reduce_scalar_test():
t = torch.randn(()).gt(0).double()
return dict(
fullname='BCELoss_no_reduce_scalar',
constructor=wrap_functional(
lambda i: F.binary_cross_entropy(i, t.type_as(i), reduction='none')),
cpp_function_call='F::binary_cross_entropy('
'i, t.to(i.options()), F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))',
input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_: -(t * i.log() + (1 - t) * (1 - i).log()),
pickle=False)
def bceloss_weights_no_reduce_test():
t = Variable(torch.randn(15, 10).gt(0).double())
weights = torch.rand(10)
return dict(
fullname='BCELoss_weights_no_reduce',
constructor=wrap_functional(
lambda i: F.binary_cross_entropy(i, t.type_as(i),
weight=weights.type_as(i), reduction='none')),
cpp_function_call='F::binary_cross_entropy('
'i, t.to(i.options()), '
'F::BinaryCrossEntropyFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))',
input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),
cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights},
reference_fn=lambda i, p, m: -(t * i.log() + (1 - t) * (1 - i).log()) * weights,
pickle=False,
precision=3e-4
)
def bceloss_weights_no_reduce_scalar_test():
t = torch.randn(()).double()
weights = torch.rand(())
return dict(
fullname='BCELoss_weights_no_reduce_scalar',
constructor=wrap_functional(
lambda i: F.binary_cross_entropy(i, t.type_as(i),
weight=weights.type_as(i), reduction='none')),
cpp_function_call='''F::binary_cross_entropy(
i, t.to(i.options()),
F::BinaryCrossEntropyFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))''',
cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights},
input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2),
reference_fn=lambda i, *_: -(t * i.log() + (1 - t) * (1 - i).log()) * weights,
pickle=False
)
def bce_with_logistic_legacy_enum_test():
t = Variable(torch.randn(15, 10).gt(0).double())
sigmoid = nn.Sigmoid()
return dict(
fullname='BCEWithLogitsLoss_legacy_enum',
constructor=wrap_functional(
lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduce=False)),
cpp_function_call='''F::binary_cross_entropy_with_logits(
i, t.to(i.options()), F::BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()),
check_gradgrad=False,
pickle=False,
)
def bce_with_logistic_no_reduce_test():
t = Variable(torch.randn(15, 10).gt(0).double())
sigmoid = nn.Sigmoid()
return dict(
fullname='BCEWithLogitsLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduction='none')),
cpp_function_call='''F::binary_cross_entropy_with_logits(
i, t.to(i.options()), F::BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()),
check_gradgrad=False,
pickle=False,
)
def bce_with_logistic_no_reduce_scalar_test():
t = torch.randn(()).gt(0).double()
sigmoid = nn.Sigmoid()
return dict(
fullname='BCEWithLogitsLoss_no_reduce_scalar',
constructor=wrap_functional(
lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduction='none')),
cpp_function_call='''F::binary_cross_entropy_with_logits(
i, t.to(i.options()), F::BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()),
check_gradgrad=False,
pickle=False
)
def kldivloss_with_target_no_reduce_test():
t = torch.rand(10, 10)
return dict(
fullname='KLDivLoss_with_target_no_reduce',
constructor=wrap_functional(
lambda i: F.kl_div(i, t.type_as(i), reduction='none')),
cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone))',
input_fn=lambda: torch.rand(10, 10).log(),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'),
supports_forward_ad=True,
pickle=False)
def kldivloss_no_reduce_test():
t = torch.rand(10, 10)
return dict(
fullname='KLDivLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.kl_div(i, t.type_as(i), reduction='none')),
cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone))',
input_fn=lambda: torch.rand(10, 10).log(),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'),
supports_forward_ad=True,
pickle=False,
)
def kldivloss_no_reduce_scalar_test():
t = torch.rand(())
return dict(
fullname='KLDivLoss_no_reduce_scalar',
constructor=wrap_functional(
lambda i: F.kl_div(i, t.type_as(i), reduction='none')),
cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone))',
input_fn=lambda: torch.rand(()).log(),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'),
supports_forward_ad=True,
pickle=False)
def kldivloss_with_log_target_no_reduce_test():
t = torch.rand(10, 10).log()
return dict(
fullname='KLDivLoss_with_log_target_no_reduce',
constructor=wrap_functional(
lambda i: F.kl_div(i, t.type_as(i), reduction='none', log_target=True)),
cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone).log_target(true))',
input_fn=lambda: torch.rand(10, 10).log(),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['KLDivLoss_log_target'](i, t.type_as(i), reduction='none'),
supports_forward_ad=True,
pickle=False)
def kldivloss_no_reduce_log_target_test():
t = torch.rand(10, 10).log()
return dict(
fullname='KLDivLoss_no_reduce_log_target',
constructor=wrap_functional(
lambda i: F.kl_div(i, t.type_as(i), reduction='none', log_target=True)),
cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone).log_target(true))',
input_fn=lambda: torch.rand(10, 10).log(),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['KLDivLoss_log_target'](i, t.type_as(i), reduction='none'),
supports_forward_ad=True,
pickle=False,
)
def kldivloss_no_reduce_scalar_log_target_test():
t = torch.rand(()).log()
return dict(
fullname='KLDivLoss_no_reduce_scalar_log_target',
constructor=wrap_functional(
lambda i: F.kl_div(i, t.type_as(i), reduction='none', log_target=True)),
cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone).log_target(true))',
input_fn=lambda: torch.rand(()).log(),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['KLDivLoss_log_target'](i, t.type_as(i), reduction='none'),
supports_forward_ad=True,
pickle=False)
def l1loss_no_reduce_test():
t = torch.randn(2, 3, 4)
return dict(
fullname='L1Loss_no_reduce',
constructor=wrap_functional(
lambda i: F.l1_loss(i, t.type_as(i), reduction='none')),
cpp_function_call='F::l1_loss(i, t.to(i.options()), F::L1LossFuncOptions().reduction(torch::kNone))',
input_fn=lambda: torch.randn(2, 3, 4),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_: (i - t.type_as(i)).abs(),
supports_forward_ad=True,
pickle=False)
def l1loss_no_reduce_complex_test():
t = torch.randn(2, 3, 4, dtype=torch.cdouble)
return dict(
fullname='L1Loss_no_reduce_complex',
constructor=wrap_functional(
lambda i: F.l1_loss(i, t.type_as(i), reduction='none')),
cpp_function_call='F::l1_loss(i, t.to(i.options()), F::L1LossFuncOptions().reduction(torch::kNone))',
input_fn=lambda: torch.randn(2, 3, 4, dtype=torch.cdouble),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_: (i - t.type_as(i)).abs(),
supports_forward_ad=True,
pickle=False)
def l1loss_no_reduce_scalar_test():
t = torch.randn(())
return dict(
fullname='L1Loss_no_reduce_scalar',
constructor=wrap_functional(
lambda i: F.l1_loss(i, t.type_as(i), reduction='none')),
cpp_function_call='F::l1_loss(i, t.to(i.options()), F::L1LossFuncOptions().reduction(torch::kNone))',
input_fn=lambda: torch.randn(()),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_: (i - t.type_as(i)).abs(),
supports_forward_ad=True,
pickle=False)
def mseloss_no_reduce_test():
input_size = (2, 3, 4, 5)
target = torch.randn(*input_size)
return dict(
fullname='MSELoss_no_reduce',
constructor=wrap_functional(
lambda i: F.mse_loss(i, target.type_as(i), reduction='none')),
cpp_function_call='F::mse_loss(i, target.to(i.options()), F::MSELossFuncOptions().reduction(torch::kNone))',
input_size=input_size,
cpp_var_map={'i': '_get_input()', 'target': target},
reference_fn=lambda i, *_: (i - target).pow(2),
supports_forward_ad=True,
pickle=False)
def mseloss_no_reduce_scalar_test():
input_size = ()
target = torch.randn(input_size)
return dict(
fullname='MSELoss_no_reduce_scalar',
constructor=wrap_functional(
lambda i: F.mse_loss(i, target.type_as(i), reduction='none')),
cpp_function_call='F::mse_loss(i, target.to(i.options()), F::MSELossFuncOptions().reduction(torch::kNone))',
input_size=input_size,
cpp_var_map={'i': '_get_input()', 'target': target},
reference_fn=lambda i, *_: (i - target).pow(2),
supports_forward_ad=True,
pickle=False)
def nllloss_no_reduce_test():
t = Variable(torch.empty(15).uniform_().mul(10).floor().long())
kwargs = {'reduction': 'none'}
return dict(
fullname='NLLLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), reduction=kwargs['reduction'])),
cpp_function_call='''F::nll_loss(
i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.rand(15, 10).log(),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs),
pickle=False)
def nllloss_no_reduce_ignore_index_test():
t = Variable(torch.empty(15).uniform_().mul(10).floor().long())
kwargs: Dict[str, Union[int, str]] = {'ignore_index': 2, 'reduction': 'none'}
return dict(
fullname='NLLLoss_no_reduce_ignore_index',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), ignore_index=int(kwargs['ignore_index']),
reduction=str(kwargs['reduction']))),
cpp_function_call='''F::nll_loss(
i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().ignore_index(2).reduction(torch::kNone))''',
input_fn=lambda: torch.rand(15, 10).log(),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs),
pickle=False)
def nllloss_no_reduce_weights_test():
t = Variable(torch.empty(15).uniform_().mul(10).floor().long())
weight = torch.rand(10)
def kwargs(i):
return {'weight': weight.type_as(i), 'reduction': 'none'}
return dict(
fullname='NLLLoss_no_reduce_weights',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))),
cpp_function_call='''F::nll_loss(
i, t.to(i.options()).to(torch::kLong),
F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone))''',
input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),
cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight},
reference_fn=lambda i, *_:
loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)),
pickle=False)
def nllloss_no_reduce_weights_ignore_index_test():
t = Variable(torch.empty(15).uniform_().mul(10).floor().long())
weight = torch.rand(10)
def kwargs(i):
return {'weight': weight.type_as(i), 'reduction': 'none',
'ignore_index': 2}
return dict(
fullname='NLLLoss_no_reduce_weights_ignore_index',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i.data))),
cpp_function_call='''F::nll_loss(
i, t.to(i.options()).to(torch::kLong),
F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone).ignore_index(2))''',
input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),
cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight},
reference_fn=lambda i, *_:
loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)),
pickle=False)
def nllloss_no_reduce_weights_ignore_index_neg_test():
t = Variable(torch.empty(15).uniform_().mul(10).floor().long())
weight = torch.rand(10)
def kwargs(i):
return {'weight': weight.type_as(i), 'reduction': 'none',
'ignore_index': -1}
return dict(
fullname='NLLLoss_no_reduce_weights_ignore_index_neg',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))),
cpp_function_call='''F::nll_loss(
i, t.to(i.options()).to(torch::kLong),
F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone).ignore_index(-1))''',
input=torch.rand(15, 10).add(1e-2).log(),
cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight},
reference_fn=lambda i, *_:
loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)),
pickle=False)
def nllloss2d_no_reduce_test():
t = Variable(torch.rand(2, 5, 5).mul(3).floor().long())
kwargs = {'reduction': 'none'}
return dict(
fullname='NLLLoss2d_no_reduce',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), reduction=kwargs['reduction'])),
cpp_function_call='''F::nll_loss(
i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.rand(2, 3, 5, 5).log(),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs),
pickle=False)
def nllloss2d_no_reduce_ignore_index_test():
t = Variable(torch.rand(2, 5, 5).mul(3).floor().long())
kwargs: Dict[str, Union[int, str]] = {'ignore_index': 1, 'reduction': 'none'}
return dict(
fullname='NLLLoss2d_no_reduce_ignore_index',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), ignore_index=int(kwargs['ignore_index']),
reduction=str(kwargs['reduction']))),
cpp_function_call='''F::nll_loss(
i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().ignore_index(1).reduction(torch::kNone))''',
input_fn=lambda: torch.rand(2, 3, 5, 5).log(),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs),
pickle=False)
def nllloss2d_no_reduce_weights_test():
t = Variable(torch.rand(2, 5, 5).mul(3).floor().long())
weight = torch.rand(3)
def kwargs(i):
return {'weight': weight.type_as(i), 'reduction': 'none'}
return dict(
fullname='NLLLoss2d_no_reduce_weights',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))),
cpp_function_call='''F::nll_loss(
i, t.to(i.options()).to(torch::kLong),
F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone))''',
input_fn=lambda: torch.rand(2, 3, 5, 5).log(),
cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight},
reference_fn=lambda i, *_:
loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs(i)),
pickle=False)
def nlllossNd_no_reduce_test():
t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long())
kwargs = {'reduction': 'none'}
return dict(
fullname='NLLLossNd_no_reduce',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), reduction=kwargs['reduction'])),
cpp_function_call='''F::nll_loss(
i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs),
pickle=False)
def nlllossNd_no_reduce_ignore_index_test():
t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long())
kwargs: Dict[str, Union[int, str]] = {'ignore_index': 1, 'reduction': 'none'}
return dict(
fullname='NLLLossNd_no_reduce_ignore_index',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), ignore_index=int(kwargs['ignore_index']),
reduction=str(kwargs['reduction']))),
cpp_function_call='''F::nll_loss(
i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().ignore_index(1).reduction(torch::kNone))''',
input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs),
pickle=False)
def nlllossNd_no_reduce_weights_test():
t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long())
weight = torch.rand(3)
def kwargs(i):
return {'weight': weight.type_as(i), 'reduction': 'none'}
return dict(
fullname='NLLLossNd_no_reduce_weights',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))),
cpp_function_call='''F::nll_loss(
i, t.to(i.options()).to(torch::kLong),
F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone))''',
input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(),
cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight},
reference_fn=lambda i, *_:
loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs(i)),
pickle=False)
def smoothl1loss_no_reduce_test():
t = torch.randn(2, 3, 4)
return dict(
fullname='SmoothL1Loss_no_reduce',
constructor=wrap_functional(
lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none')),
cpp_function_call='''F::smooth_l1_loss(
i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.randn(2, 3, 4),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none'),
supports_forward_ad=True,
pickle=False)
def smoothl1loss_no_reduce_scalar_test():
t = torch.randn(())
return dict(
fullname='SmoothL1Loss_no_reduce_scalar',
constructor=wrap_functional(
lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none')),
cpp_function_call='''F::smooth_l1_loss(
i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.randn(()),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none'),
supports_forward_ad=True,
pickle=False)
def smoothl1loss_beta_test():
t = torch.randn(2, 3, 4)
return dict(
fullname='SmoothL1Loss_beta',
constructor=wrap_functional(
lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none', beta=0.5)),
cpp_function_call='''F::smooth_l1_loss(
i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone), 0.5)''',
input_fn=lambda: torch.randn(2, 3, 4),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none', beta=0.5),
supports_forward_ad=True,
pickle=False)
def smoothl1loss_zero_beta_test():
t = torch.randn(2, 3, 4)
return dict(
fullname='SmoothL1Loss_zero_beta',
constructor=wrap_functional(
lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none', beta=0)),
cpp_function_call='''F::smooth_l1_loss(
i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone), 0)''',
input_fn=lambda: torch.randn(2, 3, 4),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none', beta=0),
supports_forward_ad=True,
pickle=False)
def huberloss_delta_test():
t = torch.randn(2, 3, 4)
return dict(
fullname='HuberLoss_delta',
constructor=wrap_functional(
lambda i: F.huber_loss(i, t.type_as(i), reduction='none', delta=0.5)),
cpp_function_call='''F::huber_loss(
i, t.to(i.options()), F::HuberLossFuncOptions().reduction(torch::kNone).delta(0.5))''',
input_fn=lambda: torch.randn(2, 3, 4),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['HuberLoss'](i, t.type_as(i), reduction='none', delta=0.5),
supports_forward_ad=True,
pickle=False)
def multilabelmarginloss_0d_no_reduce_test():
t = torch.zeros(()).long()
return dict(
fullname='MultiLabelMarginLoss_0d_no_reduce',
constructor=wrap_functional(
lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')),
cpp_function_call='''F::multilabel_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.randn(()),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multilabelmarginloss_1d_no_reduce_test():
t = Variable(torch.rand(10).mul(10).floor().long())
return dict(
fullname='MultiLabelMarginLoss_1d_no_reduce',
constructor=wrap_functional(
lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')),
cpp_function_call='''F::multilabel_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.randn(10),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multilabelmarginloss_index_neg_test():
t = Variable(torch.clamp(torch.rand(5, 10).add(-.5).mul(20).floor().long(), min=-1))
return dict(
fullname='MultiLabelMarginLoss_index_neg',
constructor=wrap_functional(
lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')),
cpp_function_call='''F::multilabel_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.randn(5, 10),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multilabelmarginloss_no_reduce_test():
t = Variable(torch.rand(5, 10).mul(10).floor().long())
return dict(
fullname='MultiLabelMarginLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')),
cpp_function_call='''F::multilabel_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.randn(5, 10),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def hingeembeddingloss_no_reduce_test():
t = Variable(torch.randn(10).gt(0).double().mul_(2).sub(1))
return dict(
fullname='HingeEmbeddingLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.hinge_embedding_loss(i, t.type_as(i), reduction='none')),
cpp_function_call='''F::hinge_embedding_loss(
i, t.to(i.options()), F::HingeEmbeddingLossFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.randn(10),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['HingeEmbeddingLoss'](i, t.type_as(i), reduction='none'),
check_sum_reduction=True,
pickle=False)
def hingeembeddingloss_margin_no_reduce_test():
t = Variable(torch.randn(10).gt(0).double().mul_(2).sub(1))
return dict(
fullname='HingeEmbeddingLoss_margin_no_reduce',
constructor=wrap_functional(
lambda i: F.hinge_embedding_loss(i, t.type_as(i), margin=0.5, reduction='none')),
cpp_function_call='''F::hinge_embedding_loss(
i, t.to(i.options()), F::HingeEmbeddingLossFuncOptions().margin(0.5).reduction(torch::kNone))''',
input_fn=lambda: torch.randn(10),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['HingeEmbeddingLoss'](i, t.type_as(i), margin=0.5, reduction='none'),
check_sum_reduction=True,
pickle=False)
def softmarginloss_no_reduce_test():
t = torch.randn(5, 5)
return dict(
fullname='SoftMarginLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.soft_margin_loss(i, t.type_as(i), reduction='none')),
cpp_function_call='''F::soft_margin_loss(
i, t.to(i.options()), F::SoftMarginLossFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.randn(5, 5),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['SoftMarginLoss'](i, t.type_as(i), reduction='none'),
supports_forward_ad=True,
pickle=False)
def multilabelsoftmarginloss_no_reduce_test():
t = torch.rand(5, 10).mul(2).floor()
return dict(
fullname='MultiLabelSoftMarginLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.multilabel_soft_margin_loss(i, t.type_as(i), reduction='none')),
cpp_function_call='''F::multilabel_soft_margin_loss(
i, t.to(i.options()), F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.randn(5, 10),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
(-(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log())).sum(dim=1) / i.size(1),
check_gradgrad=False,
pickle=False)
def multilabelsoftmarginloss_weights_no_reduce_test():
t = torch.rand(5, 10).mul(2).floor()
weights = torch.rand(10)
return dict(
fullname='MultiLabelSoftMarginLoss_weights_no_reduce',
constructor=wrap_functional(
lambda i: F.multilabel_soft_margin_loss(i, t.type_as(i),
weight=weights.type_as(i), reduction='none')),
cpp_function_call='''F::multilabel_soft_margin_loss(
i, t.to(i.options()),
F::MultilabelSoftMarginLossFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))''',
input_fn=lambda: torch.randn(5, 10),
cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights},
reference_fn=lambda i, *_:
(-(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()) * weights).sum(dim=1) / i.size(1),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multimarginloss_no_reduce_test():
t = torch.rand(5).mul(8).floor().long()
return dict(
fullname='MultiMarginLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')),
cpp_function_call='''F::multi_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.randn(5, 10),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multimarginloss_1d_no_reduce_test():
t = torch.rand(1).mul(8).floor().long()
return dict(
fullname='MultiMarginLoss_1d_no_reduce',
constructor=wrap_functional(
lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')),
cpp_function_call='''F::multi_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.randn(10),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multimarginloss_1d_input_0d_target_no_reduce_test():
t = torch.rand(()).mul(8).floor().long()
return dict(
fullname='multimarginloss_1d_input_0d_target_no_reduce',
constructor=wrap_functional(
lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')),
cpp_function_call='''F::multi_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().reduction(torch::kNone))''',
input_fn=lambda: torch.randn(10),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multimarginloss_p_no_reduce_test():
t = torch.rand(5).mul(8).floor().long()
return dict(
fullname='MultiMarginLoss_p_no_reduce',
constructor=wrap_functional(
lambda i: F.multi_margin_loss(i, t.type_as(i).long(), p=2, reduction='none')),
cpp_function_call='''F::multi_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().p(2).reduction(torch::kNone))''',
input_fn=lambda: torch.randn(5, 10).clamp_(1e-2, 1 - 1e-2),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), p=2, reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multimarginloss_margin_no_reduce_test():
t = torch.rand(5).mul(8).floor().long()
return dict(
fullname='MultiMarginLoss_margin_no_reduce',
constructor=wrap_functional(
lambda i: F.multi_margin_loss(i, t.type_as(i).long(), margin=0.5, reduction='none')),
cpp_function_call='''F::multi_margin_loss(
i, t.to(i.options()).to(torch::kLong),
F::MultiMarginLossFuncOptions().margin(0.5).reduction(torch::kNone))''',
input_fn=lambda: torch.randn(5, 10),
cpp_var_map={'i': '_get_input()', 't': t},
reference_fn=lambda i, *_:
loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(),
margin=0.5, reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multimarginloss_weights_no_reduce_test():
t = torch.rand(5).mul(8).floor().long()
weights = torch.rand(10)
return dict(
fullname='MultiMarginLoss_weights_no_reduce',
constructor=wrap_functional(
lambda i: F.multi_margin_loss(i, t.type_as(i).long(), weight=weights.type_as(i),
reduction='none')),
cpp_function_call='''F::multi_margin_loss(
i, t.to(i.options()).to(torch::kLong),
F::MultiMarginLossFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))''',
input_fn=lambda: torch.randn(5, 10),
cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights},
reference_fn=lambda i, *_:
loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(),
weight=weights, reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def fractional_max_pool2d_test(test_case, return_indices=False):
random_samples = torch.empty((1, 3, 2), dtype=torch.double).uniform_()
if test_case == 'ratio':
out = dict(
constructor=lambda: nn.FractionalMaxPool2d(
2, output_ratio=0.5, _random_samples=random_samples, return_indices=return_indices),
cpp_constructor_args='''torch::nn::FractionalMaxPool2dOptions(2)
.output_ratio(0.5)
._random_samples(random_samples)''',
input_size=(1, 3, 5, 7),
cpp_var_map={'random_samples': random_samples},
fullname='FractionalMaxPool2d_ratio')
elif test_case == 'size':
out = dict(
constructor=lambda: nn.FractionalMaxPool2d((2, 3), output_size=(
4, 3), _random_samples=random_samples, return_indices=return_indices),
cpp_constructor_args='''torch::nn::FractionalMaxPool2dOptions({2, 3})
.output_size(std::vector<int64_t>({4, 3}))
._random_samples(random_samples)''',
input_size=(1, 3, 7, 6),
cpp_var_map={'random_samples': random_samples},
fullname='FractionalMaxPool2d_size')
if return_indices:
# to get the return_indices behavior we have to call
# `forward_with_indices` in C++ and the return type switches from
# Tensor to tuple<Tensor, Tensor> which complicates testing considerably.
out['test_cpp_api_parity'] = False
out['fullname'] = '%s_return_indices' % out['fullname']
return out
def fractional_max_pool2d_no_batch_dim_test(test_case, use_random_samples):
if use_random_samples:
# random_samples enables CPU and GPU checks to be consistent
random_samples = torch.empty((1, 3, 2), dtype=torch.double).uniform_()
if test_case == 'ratio':
return dict(
constructor=lambda: nn.FractionalMaxPool2d(
2, output_ratio=0.5, _random_samples=random_samples),
cpp_constructor_args='''torch::nn::FractionalMaxPool2dOptions(2)
.output_ratio(0.5)
._random_samples(random_samples)''',
input_size=(3, 5, 7),
cpp_var_map={'random_samples': random_samples},
reference_fn=single_batch_reference_fn,
fullname='FractionalMaxPool2d_ratio_no_batch_dim')
elif test_case == 'size':
return dict(
constructor=lambda: nn.FractionalMaxPool2d((2, 3), output_size=(
4, 3), _random_samples=random_samples),
cpp_constructor_args='''torch::nn::FractionalMaxPool2dOptions({2, 3})
.output_size(std::vector<int64_t>({4, 3}))
._random_samples(random_samples)''',
input_size=(3, 7, 6),
cpp_var_map={'random_samples': random_samples},
reference_fn=single_batch_reference_fn,
fullname='FractionalMaxPool2d_size_no_batch_dim')
else:
# can not check cuda because there RNG is different between cpu and cuda
if test_case == 'ratio':
return dict(
constructor=lambda: nn.FractionalMaxPool2d(
2, output_ratio=0.5),
cpp_constructor_args='''torch::nn::FractionalMaxPool2dOptions(2)
.output_ratio(0.5)''',
input_size=(3, 5, 7),
reference_fn=single_batch_reference_fn,
test_cuda=False,
fullname='FractionalMaxPool2d_ratio_no_batch_dim_no_random_samples')
elif test_case == 'size':
return dict(
constructor=lambda: nn.FractionalMaxPool2d((2, 3), output_size=(
4, 3)),
cpp_constructor_args='''torch::nn::FractionalMaxPool2dOptions({2, 3})
.output_size(std::vector<int64_t>({4, 3}))''',
input_size=(3, 7, 6),
reference_fn=single_batch_reference_fn,
test_cuda=False,
fullname='FractionalMaxPool2d_size_no_batch_dim_no_random_samples')
def fractional_max_pool3d_test(test_case, return_indices=False):
random_samples = torch.empty((2, 4, 3), dtype=torch.double).uniform_()
if test_case == 'ratio':
out = dict(
constructor=lambda: nn.FractionalMaxPool3d(
2, output_ratio=0.5, _random_samples=random_samples, return_indices=return_indices),
cpp_constructor_args='''torch::nn::FractionalMaxPool3dOptions(2)
.output_ratio(0.5)
._random_samples(random_samples)''',
input_size=(2, 4, 5, 5, 5),
cpp_var_map={'random_samples': random_samples},
fullname='FractionalMaxPool3d_ratio')
elif test_case == 'size':
out = dict(
constructor=lambda: nn.FractionalMaxPool3d((2, 2, 2), output_size=(
4, 4, 4), _random_samples=random_samples, return_indices=return_indices),
cpp_constructor_args='''torch::nn::FractionalMaxPool3dOptions({2, 2, 2})
.output_size(std::vector<int64_t>({4, 4, 4}))
._random_samples(random_samples)''',
input_size=(2, 4, 7, 7, 7),
cpp_var_map={'random_samples': random_samples},
fullname='FractionalMaxPool3d_size')
elif test_case == 'asymsize':
out = dict(
constructor=lambda: nn.FractionalMaxPool3d((4, 2, 3), output_size=(
10, 3, 2), _random_samples=random_samples, return_indices=return_indices),
cpp_constructor_args='''torch::nn::FractionalMaxPool3dOptions({4, 2, 3})
.output_size(std::vector<int64_t>({10, 3, 2}))
._random_samples(random_samples)''',
input_size=(2, 4, 16, 7, 5),
cpp_var_map={'random_samples': random_samples},
fullname='FractionalMaxPool3d_asymsize')
if return_indices:
# to get the return_indices behavior we have to call
# `forward_with_indices` in C++ and the return type switches from
# Tensor to tuple<Tensor, Tensor> which complicates testing considerably.
out['test_cpp_api_parity'] = False
out['fullname'] = '%s_return_indices' % out['fullname']
return out
def fractional_max_pool3d_no_batch_dim_test(test_case, use_random_samples):
if use_random_samples:
# random_samples enables CPU and GPU checks to be consistent
random_samples = torch.empty((2, 4, 3), dtype=torch.double).uniform_()
if test_case == 'ratio':
return dict(
constructor=lambda: nn.FractionalMaxPool3d(
2, output_ratio=0.5, _random_samples=random_samples),
cpp_constructor_args='''torch::nn::FractionalMaxPool3dOptions(2)
.output_ratio(0.5)
._random_samples(random_samples)''',
input_size=(4, 5, 5, 5),
cpp_var_map={'random_samples': random_samples},
reference_fn=single_batch_reference_fn,
fullname='FractionalMaxPool3d_ratio_no_batch_dim')
elif test_case == 'size':
return dict(
constructor=lambda: nn.FractionalMaxPool3d((2, 2, 2), output_size=(
4, 4, 4), _random_samples=random_samples),
cpp_constructor_args='''torch::nn::FractionalMaxPool3dOptions({2, 2, 2})
.output_size(std::vector<int64_t>({4, 4, 4}))
._random_samples(random_samples)''',
input_size=(4, 7, 7, 7),
cpp_var_map={'random_samples': random_samples},
reference_fn=single_batch_reference_fn,
fullname='FractionalMaxPool3d_size_no_batch_dim')
else:
# can not check cuda because there RNG is different between cpu and cuda
if test_case == 'ratio':
return dict(
constructor=lambda: nn.FractionalMaxPool3d(
2, output_ratio=0.5),
cpp_constructor_args='''torch::nn::FractionalMaxPool3dOptions(2)
.output_ratio(0.5)''',
input_size=(4, 5, 5, 5),
reference_fn=single_batch_reference_fn,
test_cuda=False,
fullname='FractionalMaxPool3d_ratio_no_batch_dim_no_random_samples')
elif test_case == 'size':
return dict(
constructor=lambda: nn.FractionalMaxPool3d((2, 2, 2), output_size=(
4, 4, 4)),
cpp_constructor_args='''torch::nn::FractionalMaxPool3dOptions({2, 2, 2})
.output_size(std::vector<int64_t>({4, 4, 4}))''',
input_size=(4, 7, 7, 7),
reference_fn=single_batch_reference_fn,
test_cuda=False,
fullname='FractionalMaxPool3d_size_no_batch_dim_no_random_samples')
def single_batch_reference_fn(input, parameters, module):
"""Reference function for modules supporting no batch dimensions.
The module is passed the input and target in batched form with a single item.
The output is squeezed to compare with the no-batch input.
"""
def unsqueeze_inp(inp):
if isinstance(inp, (list, tuple)):
return [t.unsqueeze(0) for t in inp]
return inp.unsqueeze(0)
single_batch_input = unsqueeze_inp(input)
single_batch_input = [single_batch_input] if isinstance(single_batch_input, torch.Tensor) else single_batch_input
with freeze_rng_state():
return module(*single_batch_input).squeeze(0)
new_module_tests = [
poissonnllloss_no_reduce_test(),
bceloss_no_reduce_test(),
bceloss_weights_no_reduce_test(),
bce_with_logistic_legacy_enum_test(),
bce_with_logistic_no_reduce_test(),
bceloss_no_reduce_scalar_test(),
bceloss_weights_no_reduce_scalar_test(),
bce_with_logistic_no_reduce_scalar_test(),
kldivloss_with_target_no_reduce_test(),
kldivloss_no_reduce_test(),
kldivloss_no_reduce_scalar_test(),
kldivloss_with_log_target_no_reduce_test(),
kldivloss_no_reduce_log_target_test(),
kldivloss_no_reduce_scalar_log_target_test(),
l1loss_no_reduce_test(),
l1loss_no_reduce_complex_test(),
l1loss_no_reduce_scalar_test(),
mseloss_no_reduce_test(),
mseloss_no_reduce_scalar_test(),
nllloss_no_reduce_test(),
nllloss_no_reduce_ignore_index_test(),
nllloss_no_reduce_weights_test(),
nllloss_no_reduce_weights_ignore_index_test(),
nllloss_no_reduce_weights_ignore_index_neg_test(),
nllloss2d_no_reduce_test(),
nllloss2d_no_reduce_weights_test(),
nllloss2d_no_reduce_ignore_index_test(),
nlllossNd_no_reduce_test(),
nlllossNd_no_reduce_weights_test(),
nlllossNd_no_reduce_ignore_index_test(),
smoothl1loss_no_reduce_test(),
smoothl1loss_no_reduce_scalar_test(),
smoothl1loss_beta_test(),
smoothl1loss_zero_beta_test(),
huberloss_delta_test(),
multilabelmarginloss_0d_no_reduce_test(),
multilabelmarginloss_1d_no_reduce_test(),
multilabelmarginloss_index_neg_test(),
multilabelmarginloss_no_reduce_test(),
hingeembeddingloss_no_reduce_test(),
hingeembeddingloss_margin_no_reduce_test(),
softmarginloss_no_reduce_test(),
multilabelsoftmarginloss_no_reduce_test(),
multilabelsoftmarginloss_weights_no_reduce_test(),
multimarginloss_no_reduce_test(),
multimarginloss_1d_no_reduce_test(),
multimarginloss_1d_input_0d_target_no_reduce_test(),
multimarginloss_p_no_reduce_test(),
multimarginloss_margin_no_reduce_test(),
multimarginloss_weights_no_reduce_test(),
fractional_max_pool2d_test('ratio'),
fractional_max_pool2d_test('size'),
fractional_max_pool2d_no_batch_dim_test('ratio', True),
fractional_max_pool2d_no_batch_dim_test('ratio', False),
fractional_max_pool2d_no_batch_dim_test('size', True),
fractional_max_pool2d_no_batch_dim_test('size', False),
fractional_max_pool2d_test('ratio', return_indices=True),
fractional_max_pool3d_test('ratio'),
fractional_max_pool3d_test('size'),
fractional_max_pool3d_test('asymsize'),
fractional_max_pool3d_test('ratio', return_indices=True),
fractional_max_pool3d_no_batch_dim_test('ratio', True),
fractional_max_pool3d_no_batch_dim_test('ratio', False),
fractional_max_pool3d_no_batch_dim_test('size', True),
fractional_max_pool3d_no_batch_dim_test('size', False),
dict(
module_name='BatchNorm1d',
constructor_args=(10,),
cpp_constructor_args='torch::nn::BatchNorm1dOptions(10)',
input_size=(4, 10),
cudnn=True,
check_eval=True,
desc='affine',
),
dict(
module_name='BatchNorm1d',
constructor_args=(5,),
cpp_constructor_args='torch::nn::BatchNorm1dOptions(5)',
input_size=(4, 5, 3),
cudnn=True,
check_eval=True,
desc='3d_input',
),
dict(
module_name='BatchNorm1d',
constructor_args=(10, 1e-3, None),
cpp_constructor_args='torch::nn::BatchNorm1dOptions(10).eps(1e-3).momentum(c10::nullopt)',
input_size=(4, 10),
cudnn=True,
check_eval=True,
desc='affine_simple_average',
),
dict(
module_name='BatchNorm1d',
constructor_args=(10, 1e-3, 0.3, False),
cpp_constructor_args='torch::nn::BatchNorm1dOptions(10).eps(1e-3).momentum(0.3).affine(false)',
input_size=(4, 10),
cudnn=True,
check_eval=True,
desc='not_affine',
),
dict(
module_name='BatchNorm1d',
constructor_args=(10, 1e-3, 0.3, True, False),
cpp_constructor_args='''torch::nn::BatchNorm1dOptions(10)
.eps(1e-3).momentum(0.3).affine(true).track_running_stats(false)''',
input_size=(4, 10),
cudnn=True,
check_eval=True,
desc='not_tracking_stats',
),
dict(
module_name='BatchNorm1d',
constructor_args=(5, 1e-3, 0.3, False),
cpp_constructor_args='torch::nn::BatchNorm1dOptions(5).eps(1e-3).momentum(0.3).affine(false)',
input_size=(4, 5, 3),
cudnn=True,
check_eval=True,
desc='3d_input_not_affine',
),
dict(
module_name='BatchNorm1d',
constructor_args=(5, 1e-3, 0.3, False),
cpp_constructor_args='torch::nn::BatchNorm1dOptions(5).eps(1e-3).momentum(0.3).affine(false)',
input_size=(0, 5, 9),
cudnn=True,
check_eval=True,
desc='zero_batch',
),
dict(
module_name='BatchNorm2d',
constructor_args=(3,),
cpp_constructor_args='torch::nn::BatchNorm2dOptions(3)',
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
),
dict(
module_name='BatchNorm2d',
constructor_args=(3, 1e-3, None),
cpp_constructor_args='torch::nn::BatchNorm2dOptions(3).eps(1e-3).momentum(c10::nullopt)',
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
desc='2d_simple_average',
),
dict(
module_name='BatchNorm2d',
constructor_args=(3, 1e-3, 0.8),
cpp_constructor_args='torch::nn::BatchNorm2dOptions(3).eps(1e-3).momentum(0.8)',
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
desc='momentum',
),
dict(
module_name='BatchNorm2d',
constructor_args=(3, 1e-3, 0.8, False),
cpp_constructor_args='torch::nn::BatchNorm2dOptions(3).eps(1e-3).momentum(0.8).affine(false)',
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
desc='not_affine',
),
dict(
module_name='BatchNorm2d',
constructor_args=(3, 1e-3, 0.8, True, False),
cpp_constructor_args='''torch::nn::BatchNorm2dOptions(3)
.eps(1e-3).momentum(0.8).affine(true).track_running_stats(false)''',
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
desc='not_tracking_stats',
),
dict(
module_name='BatchNorm2d',
constructor_args=(5, 1e-3, 0.3, False),
cpp_constructor_args='torch::nn::BatchNorm2dOptions(5).eps(1e-3).momentum(0.3).affine(false)',
input_size=(0, 5, 2, 2),
cudnn=True,
check_eval=True,
desc='zero_batch',
),
dict(
module_name='BatchNorm3d',
constructor_args=(3,),
cpp_constructor_args='torch::nn::BatchNorm3dOptions(3)',
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
),
dict(
module_name='BatchNorm3d',
constructor_args=(3, 1e-3, None),
cpp_constructor_args='torch::nn::BatchNorm3dOptions(3).eps(1e-3).momentum(c10::nullopt)',
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
desc='3d_simple_average',
),
dict(
module_name='BatchNorm3d',
constructor_args=(3, 1e-3, 0.7),
cpp_constructor_args='torch::nn::BatchNorm3dOptions(3).eps(1e-3).momentum(0.7)',
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
desc='momentum',
),
dict(
module_name='BatchNorm3d',
constructor_args=(3, 1e-3, 0.7, False),
cpp_constructor_args='torch::nn::BatchNorm3dOptions(3).eps(1e-3).momentum(0.7).affine(false)',
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
desc='not_affine',
),
dict(
module_name='BatchNorm3d',
constructor_args=(3, 1e-3, 0.7, True, False),
cpp_constructor_args='''torch::nn::BatchNorm3dOptions(3)
.eps(1e-3).momentum(0.7).affine(true).track_running_stats(false)''',
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
desc='not_tracking_stats',
),
dict(
module_name='BatchNorm3d',
constructor_args=(5, 1e-3, 0.3, False),
cpp_constructor_args='torch::nn::BatchNorm3dOptions(5).eps(1e-3).momentum(0.3).affine(false)',
input_size=(0, 5, 2, 2, 2),
cudnn=True,
check_eval=True,
desc='zero_batch',
),
dict(
module_name='InstanceNorm1d',
constructor_args=(3, 1e-3, 0.3),
cpp_constructor_args='torch::nn::InstanceNorm1dOptions(3).eps(1e-3).momentum(0.3)',
input_size=(4, 3, 15),
cudnn=True,
check_eval=True,
),
dict(
module_name='InstanceNorm1d',
constructor_args=(3, 1e-3, 0.3, False, True),
cpp_constructor_args='''torch::nn::InstanceNorm1dOptions(3)
.eps(1e-3).momentum(0.3).affine(false).track_running_stats(true)''',
input_size=(4, 3, 15),
cudnn=True,
check_eval=True,
desc='tracking_stats',
),
dict(
module_name='InstanceNorm1d',
constructor_args=(3, 1e-3, 0.3, False, True),
cpp_constructor_args='''torch::nn::InstanceNorm1dOptions(3)
.eps(1e-3).momentum(0.3).affine(false).track_running_stats(true)''',
input_size=(3, 15),
cudnn=True,
check_eval=True,
ref=single_batch_reference_fn,
desc='tracking_stats_no_batch_dim',
),
dict(
module_name='InstanceNorm1d',
constructor_args=(3, 1e-3, 0.3),
cpp_constructor_args='torch::nn::InstanceNorm1dOptions(3).eps(1e-3).momentum(0.3)',
input_size=(3, 15),
cudnn=True,
check_eval=True,
ref=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='InstanceNorm2d',
constructor_args=(3, 1e-3, 0.3),
cpp_constructor_args='torch::nn::InstanceNorm2dOptions(3).eps(1e-3).momentum(0.3)',
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
),
dict(
module_name='InstanceNorm2d',
constructor_args=(3, 1e-3, 0.3, False, True),
cpp_constructor_args='''torch::nn::InstanceNorm2dOptions(3)
.eps(1e-3).momentum(0.3).affine(false).track_running_stats(true)''',
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
desc='tracking_stats',
),
dict(
module_name='InstanceNorm2d',
constructor_args=(3, 1e-3, 0.3),
cpp_constructor_args='torch::nn::InstanceNorm2dOptions(3).eps(1e-3).momentum(0.3)',
input_size=(3, 6, 6),
cudnn=True,
check_eval=True,
ref=single_batch_reference_fn,
desc='no_batch_dim'
),
dict(
module_name='InstanceNorm2d',
constructor_args=(3, 1e-3, 0.3, False, True),
cpp_constructor_args='''torch::nn::InstanceNorm2dOptions(3)
.eps(1e-3).momentum(0.3).affine(false).track_running_stats(true)''',
input_size=(3, 6, 6),
cudnn=True,
check_eval=True,
ref=single_batch_reference_fn,
desc='tracking_stats_no_batch_dim',
),
dict(
module_name='InstanceNorm3d',
constructor_args=(3, 1e-3, 0.3),
cpp_constructor_args='torch::nn::InstanceNorm3dOptions(3).eps(1e-3).momentum(0.3)',
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
),
dict(
module_name='InstanceNorm3d',
constructor_args=(3, 1e-3, 0.3, False, True),
cpp_constructor_args='''torch::nn::InstanceNorm3dOptions(3)
.eps(1e-3).momentum(0.3).affine(false).track_running_stats(true)''',
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
desc='tracking_stats',
),
dict(
module_name='InstanceNorm3d',
constructor_args=(3, 1e-3, 0.3),
cpp_constructor_args='torch::nn::InstanceNorm3dOptions(3).eps(1e-3).momentum(0.3)',
input_size=(3, 4, 4, 4),
cudnn=True,
check_eval=True,
ref=single_batch_reference_fn,
desc='no_batch_dim'
),
dict(
module_name='InstanceNorm3d',
constructor_args=(3, 1e-3, 0.3, False, True),
cpp_constructor_args='''torch::nn::InstanceNorm3dOptions(3)
.eps(1e-3).momentum(0.3).affine(false).track_running_stats(true)''',
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
ref=single_batch_reference_fn,
desc='tracking_stats_no_batch_dim',
),
dict(
module_name='LayerNorm',
constructor_args=([5], 1e-3),
cpp_constructor_args='torch::nn::LayerNormOptions({5}).eps(1e-3)',
input_size=(4, 5, 5),
cudnn=True,
check_eval=True,
check_half=True,
desc='1d_elementwise_affine',
),
dict(
module_name='LayerNorm',
constructor_args=([5], 1e-3, False),
cpp_constructor_args='torch::nn::LayerNormOptions({5}).eps(1e-3).elementwise_affine(false)',
input_size=(4, 5, 5),
cudnn=True,
check_eval=True,
check_half=True,
desc='1d_no_elementwise_affine',
),
dict(
module_name='LayerNorm',
constructor_args=([2, 2, 5], 1e-3),
cpp_constructor_args='torch::nn::LayerNormOptions({2, 2, 5}).eps(1e-3)',
input_size=(4, 2, 2, 5),
cudnn=True,
check_eval=True,
check_half=True,
desc='3d_elementwise_affine',
),
dict(
module_name='LayerNorm',
constructor_args=([2, 2, 5], 1e-3, False),
cpp_constructor_args='torch::nn::LayerNormOptions({2, 2, 5}).eps(1e-3).elementwise_affine(false)',
input_size=(4, 2, 2, 5),
cudnn=True,
check_eval=True,
check_half=True,
desc='3d_no_elementwise_affine',
),
dict(
module_name='LayerNorm',
constructor_args=([56, 56, 56], 1e-5, False),
cpp_constructor_args='torch::nn::LayerNormOptions({56, 56, 56}).eps(1e-5).elementwise_affine(false)',
input_size=(4, 56, 56, 56),
cudnn=True,
check_eval=True,
gradcheck_fast_mode=True,
check_half=True,
desc='3d_no_affine_large_feature',
),
dict(
module_name='LayerNorm',
constructor_args=([5], 1e-3),
cpp_constructor_args='torch::nn::LayerNormOptions({5}).eps(1e-3)',
input_size=(0, 5),
cudnn=True,
check_eval=True,
check_half=True,
desc='1d_empty_elementwise_affine',
),
dict(
module_name='GroupNorm',
constructor_args=(3, 6, 1e-3),
cpp_constructor_args='torch::nn::GroupNormOptions(3, 6).eps(1e-3)',
input_size=(4, 6, 5),
cudnn=True,
check_eval=True,
check_bfloat16=True,
desc='1d_affine',
),
dict(
module_name='GroupNorm',
constructor_args=(3, 12, 1e-3),
cpp_constructor_args='torch::nn::GroupNormOptions(3, 12).eps(1e-3)',
input_size=(4, 12),
cudnn=True,
check_eval=True,
check_bfloat16=True,
desc='1d_affine_GN',
),
dict(
module_name='GroupNorm',
constructor_args=(1, 6, 1e-3),
cpp_constructor_args='torch::nn::GroupNormOptions(1, 6).eps(1e-3)',
input_size=(150, 6),
cudnn=True,
check_eval=True,
desc='1d_affine_large_batch', # For large batch_size
check_bfloat16=True,
test_cpu=False,
),
dict(
module_name='GroupNorm',
constructor_args=(5, 5, 1e-3, False),
cpp_constructor_args='torch::nn::GroupNormOptions(5, 5).eps(1e-3).affine(false)',
input_size=(4, 5, 5),
cudnn=True,
check_eval=True,
check_bfloat16=True,
desc='1d_no_affine_IN', # this setting is equivalent with InstanceNormi
),
dict(
module_name='GroupNorm',
constructor_args=(1, 10, 1e-3, False),
cpp_constructor_args='torch::nn::GroupNormOptions(1, 10).eps(1e-3).affine(false)',
input_size=(4, 10),
cudnn=True,
check_eval=True,
check_bfloat16=True,
desc='1d_no_affine_LN', # this setting is equivalent with LayerNorm
),
dict(
module_name='GroupNorm',
constructor_args=(3, 6, 1e-3),
cpp_constructor_args='torch::nn::GroupNormOptions(3, 6).eps(1e-3)',
input_size=(4, 6, 2, 3),
cudnn=True,
check_eval=True,
check_bfloat16=True,
desc='2d_affine',
),
dict(
module_name='GroupNorm',
constructor_args=(3, 6, 1e-3),
cpp_constructor_args='torch::nn::GroupNormOptions(3, 6).eps(1e-3)',
input_size=(4, 6, 28, 28),
cudnn=True,
check_eval=True,
check_bfloat16=True,
desc='2d_affine_large_feature',
test_cpu=False,
),
dict(
module_name='GroupNorm',
constructor_args=(3, 51, 1e-5, False),
cpp_constructor_args='torch::nn::GroupNormOptions(3, 51).eps(1e-5).affine(false)',
input_size=(2, 51, 28, 28),
cudnn=True,
check_eval=True,
check_bfloat16=True,
desc='2d_no_affine_large_feature',
test_cpu=False,
),
dict(
module_name='GroupNorm',
constructor_args=(3, 3, 1e-3, False),
cpp_constructor_args='torch::nn::GroupNormOptions(3, 3).eps(1e-3).affine(false)',
input_size=(4, 3, 2, 3),
cudnn=True,
check_eval=True,
check_bfloat16=True,
desc='2d_no_affine_IN', # this setting is equivalent with InstanceNorm
),
dict(
module_name='GroupNorm',
constructor_args=(1, 3, 1e-3, False),
cpp_constructor_args='torch::nn::GroupNormOptions(1, 3).eps(1e-3).affine(false)',
input_size=(4, 3, 2, 3),
cudnn=True,
check_eval=True,
check_bfloat16=True,
desc='2d_no_affine_LN', # this setting is equivalent with LayerNorm
),
dict(
module_name='Conv1d',
constructor_args=(4, 5, 3),
cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3)',
input_size=(2, 4, 10),
cudnn=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='Conv1d',
constructor_args=(4, 5, 3, 2),
cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).stride(2)',
input_size=(2, 4, 10),
cudnn=True,
desc='stride',
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='Conv1d',
constructor_args=(4, 5, 3, 1, 1),
cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).stride(1).padding(1)',
input_size=(2, 4, 10),
cudnn=True,
desc='pad1',
with_tf32=True,
tf32_precision=0.01,
),
dict(
module_name='Conv1d',
constructor_args=(4, 5, 5, 1, 2),
cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 5).stride(1).padding(2)',
input_size=(2, 4, 10),
cudnn=True,
desc='pad2',
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='Conv1d',
constructor_args=(4, 4, 3, 1, 1),
cpp_constructor_args='torch::nn::Conv1dOptions(4, 4, 3).stride(1).padding(1)',
input_size=(1, 4, 1),
cudnn=True,
desc='pad1size1',
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='Conv1d',
constructor_args=(4, 4, 5, 1, 2),
cpp_constructor_args='torch::nn::Conv1dOptions(4, 4, 5).stride(1).padding(2)',
input_size=(1, 4, 1),
cudnn=True,
desc='pad2size1',
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='Conv1d',
constructor_args=(4, 5, 3),
cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3)',
input_size=(0, 4, 10),
cudnn=True,
desc='zero_batch',
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='Conv1d_dilated',
constructor=lambda: nn.Conv1d(4, 5, kernel_size=3, dilation=2),
cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).dilation(2)',
input_size=(2, 4, 10),
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='Conv1d_groups',
constructor=lambda: nn.Conv1d(4, 6, kernel_size=3, groups=2),
cpp_constructor_args='torch::nn::Conv1dOptions(4, 6, 3).groups(2)',
input_size=(2, 4, 6),
cudnn=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='Conv1d_pad_valid',
constructor=lambda: nn.Conv1d(4, 5, 3, padding="valid"),
cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).padding(torch::kValid)',
input_size=(2, 4, 10),
cudnn=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='Conv1d_pad_same',
constructor=lambda: nn.Conv1d(4, 5, 3, padding="same"),
cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).padding(torch::kSame)',
input_size=(2, 4, 10),
cudnn=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='Conv1d_pad_same2',
constructor=lambda: nn.Conv1d(4, 5, 4, padding="same"),
cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 4).padding(torch::kSame)',
input_size=(2, 4, 10),
cudnn=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='Conv1d_pad_same_dilated',
constructor=lambda: nn.Conv1d(4, 5, 4, padding="same", dilation=2),
cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).padding(torch::kSame).dilation(2)',
input_size=(2, 4, 10),
cudnn=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='ConvTranspose1d',
constructor=lambda: nn.ConvTranspose1d(3, 4, kernel_size=3, stride=(3,), padding=1, output_padding=(1,)),
cpp_constructor_args='torch::nn::ConvTranspose1dOptions(3, 4, 3).stride(3).padding(1).output_padding(1)',
cudnn=True,
input_size=(1, 3, 7),
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='ConvTranspose1d',
constructor_args=(3, 4, 3, 2, 1, 1, 1, False),
cpp_constructor_args='''torch::nn::ConvTranspose1dOptions(3, 4, 3)
.stride(2).padding(1).output_padding(1).groups(1).bias(false)''',
input_size=(1, 3, 6),
cudnn=True,
desc='no_bias',
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='ConvTranspose1d',
constructor_args=(3, 4, 3, 2, 1, 1, 1, True, 2),
cpp_constructor_args='''torch::nn::ConvTranspose1dOptions(3, 4, 3)
.stride(2).padding(1).output_padding(1).groups(1).bias(true).dilation(2)''',
input_size=(1, 3, 6),
cudnn=True,
desc='dilated',
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='ConvTranspose1d_groups',
constructor=lambda: nn.ConvTranspose1d(4, 6, 3, stride=(3,), padding=1, output_padding=(1,), groups=2),
cpp_constructor_args='''torch::nn::ConvTranspose1dOptions(4, 6, 3)
.stride(3).padding(1).output_padding(1).groups(2)''',
cudnn=True,
input_size=(2, 4, 7),
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='MaxPool1d',
constructor_args=(4,),
cpp_constructor_args='torch::nn::MaxPool1dOptions(4)',
input_size=(2, 10, 4),
),
dict(
module_name='MaxPool1d',
constructor_args=(4, 4),
cpp_constructor_args='torch::nn::MaxPool1dOptions(4).stride(4)',
input_size=(2, 10, 4),
desc='stride',
),
dict(
module_name='MaxPool1d',
fullname='MaxPool1d_return_indices',
constructor=lambda: nn.MaxPool1d(4, return_indices=True),
input_size=(2, 10, 4),
test_cpp_api_parity=False,
),
dict(
module_name='Conv2d',
constructor_args=(3, 4, (3, 2)),
cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 2})',
input_size=(2, 3, 7, 5),
cudnn=True,
check_with_long_tensor=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='Conv2d',
constructor_args=(3, 4, (3, 3), (2, 2)),
cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 3}).stride({2, 2})',
input_size=(2, 3, 6, 6),
cudnn=True,
desc='strided',
check_with_long_tensor=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='Conv2d',
constructor_args=(3, 4, (3, 3), (2, 2), (1, 1)),
cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 3}).stride({2, 2}).padding({1, 1})',
input_size=(2, 3, 6, 6),
cudnn=True,
desc='padding',
check_with_long_tensor=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='Conv2d',
constructor_args=(3, 2, (3, 3), (2, 2), (1, 1), (2, 2)),
cpp_constructor_args='torch::nn::Conv2dOptions(3, 2, {3, 3}).stride({2, 2}).padding({1, 1}).dilation({2, 2})',
input_size=(2, 3, 8, 8),
cudnn=True,
desc='dilated',
check_with_long_tensor=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='Conv2d',
constructor_args=(3, 4, (3, 2), 1, 0, 1, 1, False),
cpp_constructor_args='''torch::nn::Conv2dOptions(3, 4, {3, 2})
.stride(1).padding(0).dilation(1).groups(1).bias(false)''',
input_size=(2, 3, 6, 5),
cudnn=True,
desc='no_bias',
check_with_long_tensor=True,
with_tf32=True,
),
dict(
module_name='Conv2d',
constructor_args=(3, 4, (3, 2)),
cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 2})',
input_size=(0, 3, 7, 5),
cudnn=True,
desc='zero_batch',
check_with_long_tensor=True,
with_tf32=True,
),
dict(
fullname='Conv2d_groups',
constructor=lambda: nn.Conv2d(4, 6, (3, 2), groups=2),
cpp_constructor_args='torch::nn::Conv2dOptions(4, 6, {3, 2}).groups(2)',
input_size=(2, 4, 6, 5),
cudnn=True,
check_with_long_tensor=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='Conv2d_groups_thnn',
constructor=lambda: nn.Conv2d(4, 6, (3, 2), groups=2),
cpp_constructor_args='torch::nn::Conv2dOptions(4, 6, {3, 2}).groups(2)',
input_size=(2, 4, 6, 5),
check_with_long_tensor=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='Conv2d_pad_valid',
constructor=lambda: nn.Conv2d(2, 4, (3, 4), padding="valid"),
cpp_constructor_args='torch::nn::Conv2dOptions(2, 4, {3, 4}).padding(torch::kValid)',
input_size=(2, 2, 6, 5),
cudnn=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='Conv2d_pad_same',
constructor=lambda: nn.Conv2d(2, 4, (3, 4), padding="same"),
cpp_constructor_args='torch::nn::Conv2dOptions(2, 4, {3, 4}).padding(torch::kSame)',
input_size=(2, 2, 6, 5),
cudnn=True,
with_tf32=True,
tf32_precision=0.01,
),
dict(
fullname='Conv2d_pad_same_dilated',
constructor=lambda: nn.Conv2d(2, 4, (3, 4), padding="same", dilation=2),
cpp_constructor_args='torch::nn::Conv2dOptions(2, 4, {3, 4}).padding(torch::kSame).dilation(2)',
input_size=(2, 2, 6, 5),
cudnn=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='ConvTranspose2d',
constructor_args=(3, 4, 3, (3, 2), 1, (1, 1)),
cpp_constructor_args='''torch::nn::ConvTranspose2dOptions(3, 4, 3)
.stride({3, 2}).padding(1).output_padding({1, 1})''',
cudnn=True,
input_size=(1, 3, 7, 6),
check_with_long_tensor=True,
with_tf32=True,
tf32_precision=0.01,
),
dict(
module_name='ConvTranspose2d',
constructor_args=(3, 4, 3, (2, 3), 1, (1, 1), 1, False, (2, 2)),
cpp_constructor_args='''torch::nn::ConvTranspose2dOptions(3, 4, 3)
.stride({2, 3})
.padding(1)
.output_padding({1, 1})
.groups(1)
.bias(false)
.dilation({2, 2})''',
input_size=(1, 3, 6, 7),
cudnn=True,
desc='dilated',
check_with_long_tensor=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='ConvTranspose2d',
constructor_args=(3, 4, 3, (2, 3), 1, (1, 1), 1, False),
cpp_constructor_args='''torch::nn::ConvTranspose2dOptions(3, 4, 3)
.stride({2, 3}).padding(1).output_padding({1, 1}).groups(1).bias(false)''',
input_size=(1, 3, 6, 7),
cudnn=True,
desc='no_bias',
check_with_long_tensor=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='ConvTranspose2d_groups',
constructor=lambda: nn.ConvTranspose2d(2, 4, (2, 3), groups=2),
cpp_constructor_args='torch::nn::ConvTranspose2dOptions(2, 4, {2, 3}).groups(2)',
input_size=(1, 2, 4, 5),
cudnn=True,
check_with_long_tensor=True,
with_tf32=True,
tf32_precision=0.01,
),
dict(
fullname='Conv2d_depthwise',
constructor=lambda: nn.Conv2d(4, 4, (3, 3), groups=4),
cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {3, 3}).groups(4)',
input_size=(2, 4, 6, 6),
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='Conv2d_depthwise_with_multiplier',
constructor=lambda: nn.Conv2d(4, 8, (3, 3), groups=4),
cpp_constructor_args='torch::nn::Conv2dOptions(4, 8, {3, 3}).groups(4)',
input_size=(2, 4, 6, 6),
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='Conv2d_depthwise_strided',
constructor=lambda: nn.Conv2d(4, 4, (3, 3), stride=(2, 2), groups=4),
cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {3, 3}).stride({2, 2}).groups(4)',
input_size=(2, 4, 6, 6),
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='Conv2d_depthwise_padded',
constructor=lambda: nn.Conv2d(4, 4, (3, 3), padding=(1, 1), groups=4),
cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {3, 3}).padding({1, 1}).groups(4)',
input_size=(2, 4, 6, 6),
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='Conv2d_depthwise_dilated',
constructor=lambda: nn.Conv2d(4, 4, (2, 2), dilation=(2, 2), groups=4),
cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {2, 2}).dilation({2, 2}).groups(4)',
input_size=(2, 4, 5, 5),
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='MaxPool2d',
constructor_args=((3, 3), (2, 2), (1, 1)),
cpp_constructor_args='torch::nn::MaxPool2dOptions({3, 3}).stride({2, 2}).padding({1, 1})',
input_size=(3, 7, 7),
desc='3d_input'
),
dict(
module_name='MaxPool2d',
constructor_args=((3, 3), (2, 2), (1, 1)),
cpp_constructor_args='torch::nn::MaxPool2dOptions({3, 3}).stride({2, 2}).padding({1, 1})',
input_size=(1, 3, 7, 7),
check_with_channels_last=True,
desc='4d_input'
),
dict(
module_name='MaxPool2d',
fullname='MaxPool2d_return_indices',
constructor=lambda: nn.MaxPool2d((3, 3), (2, 2), (1, 1), return_indices=True),
input_size=(1, 3, 7, 7),
check_with_channels_last=True,
test_cpp_api_parity=False,
),
dict(
module_name='AvgPool1d',
constructor_args=(2,),
cpp_constructor_args='torch::nn::AvgPool1dOptions(2)',
input_size=(2, 3, 6),
),
dict(
module_name='AvgPool1d',
constructor_args=((2,), (2,)),
cpp_constructor_args='torch::nn::AvgPool1dOptions(2).stride(2)',
input_size=(2, 3, 6),
desc='stride',
),
dict(
module_name='AvgPool1d',
constructor_args=(2, 2, 1),
cpp_constructor_args='torch::nn::AvgPool1dOptions(2).stride(2).padding(1)',
input_size=(2, 3, 6),
desc='stride_pad',
),
dict(
module_name='AvgPool1d',
constructor_args=(2,),
cpp_constructor_args='torch::nn::AvgPool1dOptions(2)',
input_size=(3, 6),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='AvgPool2d',
constructor_args=((2, 2),),
cpp_constructor_args='torch::nn::AvgPool2dOptions({2, 2})',
input_size=(2, 3, 6, 6),
),
dict(
module_name='AvgPool2d',
constructor_args=((2, 2),),
cpp_constructor_args='torch::nn::AvgPool2dOptions({2, 2})',
input_size=(3, 6, 6),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim'
),
dict(
module_name='AvgPool2d',
constructor_args=((2, 2), (2, 2)),
cpp_constructor_args='torch::nn::AvgPool2dOptions({2, 2}).stride({2, 2})',
input_size=(2, 3, 6, 6),
desc='stride',
),
dict(
module_name='AvgPool2d',
constructor_args=((2, 2), (2, 2), (1, 1)),
cpp_constructor_args='torch::nn::AvgPool2dOptions({2, 2}).stride({2, 2}).padding({1, 1})',
input_size=(2, 3, 6, 6),
desc='stride_pad',
),
dict(
fullname='AvgPool2d_divisor',
constructor=lambda: nn.AvgPool2d((2, 2), divisor_override=1),
cpp_constructor_args='torch::nn::AvgPool2dOptions({2, 2}).divisor_override(1)',
input_size=(2, 3, 6, 6),
check_with_long_tensor=True,
),
dict(
fullname='AvgPool2d_divisor_stride',
constructor=lambda: nn.AvgPool2d((2, 2), (2, 2), divisor_override=1),
cpp_constructor_args='torch::nn::AvgPool2dOptions({2, 2}).stride({2, 2}).divisor_override(1)',
input_size=(2, 3, 6, 6),
check_with_long_tensor=True,
),
dict(
fullname='AvgPool2d_divisor_stride_pad',
constructor=lambda: nn.AvgPool2d((2, 2), (2, 2), (1, 1), divisor_override=1),
cpp_constructor_args='torch::nn::AvgPool2dOptions({2, 2}).stride({2, 2}).padding({1, 1}).divisor_override(1)',
input_size=(2, 3, 6, 6),
check_with_long_tensor=True,
),
dict(
module_name='LPPool2d',
constructor_args=(2, 2, 2),
cpp_constructor_args='torch::nn::LPPool2dOptions(2, 2).stride(2)',
input_size=(1, 3, 7, 7),
),
dict(
module_name='LPPool2d',
constructor_args=(1.5, 2),
cpp_constructor_args='torch::nn::LPPool2dOptions(1.5, 2)',
input_fn=lambda: torch.rand(1, 3, 7, 7),
desc='norm',
),
dict(
module_name='LPPool1d',
constructor_args=(1.5, 2),
cpp_constructor_args='torch::nn::LPPool1dOptions(1.5, 2)',
input_fn=lambda: torch.rand(1, 3, 7),
desc='norm',
),
dict(
module_name='LPPool1d',
constructor_args=(2, 2, 3),
cpp_constructor_args='torch::nn::LPPool1dOptions(2, 2).stride(3)',
input_size=(1, 3, 7),
),
dict(
module_name='LPPool1d',
constructor_args=(2, 2, 3),
cpp_constructor_args='torch::nn::LPPool1dOptions(2, 2).stride(3)',
input_size=(3, 7),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='LocalResponseNorm',
constructor_args=(3, ),
cpp_constructor_args='torch::nn::LocalResponseNormOptions(3)',
input_size=(1, 5, 7),
desc='1d',
),
dict(
module_name='LocalResponseNorm',
constructor_args=(2, ),
cpp_constructor_args='torch::nn::LocalResponseNormOptions(2)',
input_size=(1, 5, 7, 7),
desc='2d_uneven_pad',
),
dict(
module_name='LocalResponseNorm',
constructor_args=(1, 1., 0.5, 2.),
cpp_constructor_args='torch::nn::LocalResponseNormOptions(1).alpha(1.).beta(0.5).k(2.)',
input_size=(1, 5, 7, 7, 7),
desc='3d_custom_params',
),
dict(
module_name='ReflectionPad1d',
constructor_args=((1, 2),),
cpp_constructor_args='torch::nn::ReflectionPad1dOptions({1, 2})',
input_size=(2, 3, 8),
),
dict(
module_name='ReflectionPad1d',
constructor_args=((1, 2),),
cpp_constructor_args='torch::nn::ReflectionPad1dOptions({1, 2})',
input_size=(3, 8),
reference_fn=single_batch_reference_fn,
desc='batch',
),
dict(
module_name='ReflectionPad1d',
constructor_args=((1, 2),),
cpp_constructor_args='torch::nn::ReflectionPad1dOptions({1, 2})',
input_fn=lambda: torch.rand(2, 3, 8, dtype=torch.complex128, requires_grad=True),
skip_half=True,
desc='complex'
),
dict(
module_name='ReflectionPad2d',
constructor_args=((1, 2, 3, 4),),
cpp_constructor_args='torch::nn::ReflectionPad2dOptions({1, 2, 3, 4})',
input_size=(2, 3, 8, 8),
),
dict(
module_name='ReflectionPad2d',
constructor_args=((1, 2, 3, 4),),
cpp_constructor_args='torch::nn::ReflectionPad2dOptions({1, 2, 3, 4})',
input_size=(3, 8, 8),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='ReflectionPad2d',
constructor_args=((1, 2, 3, 4),),
cpp_constructor_args='torch::nn::ReflectionPad2dOptions({1, 2, 3, 4})',
input_fn=lambda: torch.rand(2, 3, 8, 8, dtype=torch.complex128, requires_grad=True),
skip_half=True,
desc='complex'
),
dict(
module_name='ReflectionPad3d',
constructor_args=((1, 2, 0, 2, 1, 2),),
cpp_constructor_args='torch::nn::ReflectionPad3dOptions({1, 2, 0, 2, 1, 2})',
input_size=(2, 3, 8, 8, 8),
),
dict(
module_name='ReflectionPad3d',
constructor_args=((1, 2, 0, 2, 1, 2),),
cpp_constructor_args='torch::nn::ReflectionPad3dOptions({1, 2, 0, 2, 1, 2})',
input_size=(3, 8, 8, 8),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='ReflectionPad3d',
constructor_args=((1, 2, 0, 2, 1, 2),),
cpp_constructor_args='torch::nn::ReflectionPad3dOptions({1, 2, 0, 2, 1, 2})',
input_fn=lambda: torch.rand(2, 3, 8, 8, 8, dtype=torch.complex128, requires_grad=True),
skip_half=True,
desc='complex'
),
dict(
module_name='ReplicationPad1d',
constructor_args=((1, 2),),
cpp_constructor_args='torch::nn::ReplicationPad1dOptions({1, 2})',
input_size=(2, 3, 4),
),
dict(
module_name='ReplicationPad1d',
constructor_args=((1, 2),),
cpp_constructor_args='torch::nn::ReplicationPad1dOptions({1, 2})',
input_size=(3, 4),
reference_fn=single_batch_reference_fn,
desc='batch',
),
dict(
module_name='ReplicationPad1d',
constructor_args=((1, 2),),
cpp_constructor_args='torch::nn::ReplicationPad1dOptions({1, 2})',
input_fn=lambda: torch.rand(2, 3, 4, dtype=torch.complex128, requires_grad=True),
skip_half=True,
desc='complex'
),
dict(
module_name='ReplicationPad2d',
constructor_args=((1, 2, 3, 4),),
cpp_constructor_args='torch::nn::ReplicationPad2dOptions({1, 2, 3, 4})',
input_size=(2, 3, 4, 4),
),
dict(
module_name='ReplicationPad2d',
constructor_args=((1, 2, 3, 4),),
cpp_constructor_args='torch::nn::ReplicationPad2dOptions({1, 2, 3, 4})',
input_size=(3, 4, 4),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='ReplicationPad2d',
constructor_args=((1, 2, 3, 4),),
cpp_constructor_args='torch::nn::ReplicationPad2dOptions({1, 2, 3, 4})',
input_fn=lambda: torch.rand(2, 3, 4, 4, dtype=torch.complex128, requires_grad=True),
skip_half=True,
desc='complex'
),
dict(
module_name='ZeroPad2d',
constructor_args=((1, 2, 3, 4),),
cpp_constructor_args='torch::nn::ZeroPad2dOptions({1, 2, 3, 4})',
input_size=(2, 3, 4, 4),
),
dict(
module_name='ZeroPad2d',
constructor_args=((1, 2, 3, 4),),
cpp_constructor_args='torch::nn::ZeroPad2dOptions({1, 2, 3, 4})',
input_size=(3, 4, 4),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='ZeroPad2d',
constructor_args=((1, 2, 3, 4),),
cpp_constructor_args='torch::nn::ZeroPad2dOptions({1, 2, 3, 4})',
input_fn=lambda: torch.rand(2, 3, 4, 4, dtype=torch.complex128, requires_grad=True),
skip_half=True,
desc='complex'
),
dict(
module_name='ZeroPad2d',
constructor_args=((-1, -1, -1, -2),),
cpp_constructor_args='torch::nn::ZeroPad2dOptions({-1, -1, -1, -2})',
input_size=(2, 3, 4, 4),
desc='negative_dims'
),
dict(
module_name='ConstantPad1d',
constructor_args=((1, 2), 2.),
cpp_constructor_args='torch::nn::ConstantPad1dOptions({1, 2}, 2.)',
input_size=(2, 3, 4),
),
dict(
module_name='ConstantPad1d',
constructor_args=((1, 2), 2.),
cpp_constructor_args='torch::nn::ConstantPad1dOptions({1, 2}, 2.)',
input_size=(3, 4),
reference_fn=single_batch_reference_fn,
desc='batch',
),
dict(
module_name='ConstantPad1d',
constructor_args=((1, 2), 2.),
cpp_constructor_args='torch::nn::ConstantPad1dOptions({1, 2}, 2.)',
input_fn=lambda: torch.rand(2, 3, 4, dtype=torch.complex128, requires_grad=True),
skip_half=True,
desc='complex'
),
dict(
module_name='ConstantPad2d',
constructor_args=((1, 2, 3, 4), 2.),
cpp_constructor_args='torch::nn::ConstantPad2dOptions({1, 2, 3, 4}, 2.)',
input_size=(2, 3, 4, 4),
),
dict(
module_name='ConstantPad2d',
constructor_args=((1, 2, 3, 4), 2.),
cpp_constructor_args='torch::nn::ConstantPad2dOptions({1, 2, 3, 4}, 2.)',
input_size=(3, 4, 4),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim'
),
dict(
module_name='ConstantPad2d',
constructor_args=((1, 2, 3, 4), 2.),
cpp_constructor_args='torch::nn::ConstantPad2dOptions({1, 2, 3, 4}, 2.)',
input_fn=lambda: torch.rand(2, 3, 4, 4, dtype=torch.complex128, requires_grad=True),
skip_half=True,
desc='complex'
),
dict(
module_name='ConstantPad3d',
constructor_args=((1, 2, 3, 4, 1, 0), 2.),
cpp_constructor_args='torch::nn::ConstantPad3dOptions({1, 2, 3, 4, 1, 0}, 2.)',
input_size=(2, 3, 4, 4, 5),
),
dict(
module_name='ConstantPad3d',
constructor_args=((1, 2, 3, 4, 1, 0), 2.),
cpp_constructor_args='torch::nn::ConstantPad3dOptions({1, 2, 3, 4, 1, 0}, 2.)',
input_size=(3, 4, 4, 5),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim'
),
dict(
module_name='ConstantPad3d',
constructor_args=((1, 2, 3, 4, 1, 0), 2.),
cpp_constructor_args='torch::nn::ConstantPad3dOptions({1, 2, 3, 4, 1, 0}, 2.)',
input_fn=lambda: torch.rand(2, 3, 4, 4, 5, dtype=torch.complex128, requires_grad=True),
skip_half=True,
desc='complex'
),
dict(
module_name='Conv3d',
constructor_args=(2, 3, (2, 3, 2)),
cpp_constructor_args='torch::nn::Conv3dOptions(2, 3, {2, 3, 2})',
input_size=(1, 2, 4, 5, 4),
cudnn=True,
check_with_long_tensor=True,
with_tf32=True,
tf32_precision=0.05,
),
dict(
module_name='Conv3d',
constructor_args=(2, 3, (2, 3, 4), 1, 0, 1, 1, False),
cpp_constructor_args='''torch::nn::Conv3dOptions(2, 3, {2, 3, 4})
.stride(1).padding(0).dilation(1).groups(1).bias(false)''',
input_size=(1, 2, 3, 4, 5),
cudnn=True,
desc='no_bias',
check_with_long_tensor=True,
with_tf32=True,
tf32_precision=0.05,
),
dict(
module_name='Conv3d',
constructor_args=(2, 3, (1, 1, 1), 1, 0, 1, 1, False),
cpp_constructor_args='''torch::nn::Conv3dOptions(2, 3, {2, 3, 4})
.stride(1).padding(0).dilation(1).groups(1).bias(false)''',
input_size=(1, 2, 3, 4, 5),
cudnn=True,
desc='1x1x1_no_bias',
check_with_long_tensor=False,
with_tf32=True,
tf32_precision=0.05,
),
dict(
module_name='Conv3d',
constructor_args=(3, 4, 2, 2),
cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).stride(2)',
input_size=(2, 3, 5, 5, 5),
cudnn=True,
desc='stride',
check_with_long_tensor=True,
with_tf32=True,
tf32_precision=0.05,
),
dict(
module_name='Conv3d',
constructor_args=(3, 4, 2, 2, 1),
cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).stride(2).padding(1)',
input_size=(2, 3, 5, 5, 5),
cudnn=True,
desc='stride_padding',
check_with_long_tensor=True,
with_tf32=True,
tf32_precision=0.05,
),
dict(
module_name='Conv3d',
constructor_args=(3, 4, (2, 3, 4)),
cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4})',
input_size=(0, 3, 3, 4, 5),
cudnn=True,
check_with_long_tensor=True,
desc='zero_batch',
with_tf32=True,
),
dict(
fullname='Conv3d_groups',
constructor=lambda: nn.Conv3d(2, 4, kernel_size=3, groups=2),
cpp_constructor_args='torch::nn::Conv3dOptions(2, 4, 3).groups(2)',
input_size=(1, 2, 4, 5, 4),
cudnn=True,
check_with_long_tensor=True,
with_tf32=True,
tf32_precision=0.005,
),
dict(
fullname='Conv3d_dilated',
constructor=lambda: nn.Conv3d(3, 4, kernel_size=2, dilation=2),
cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).dilation(2)',
input_size=(2, 3, 5, 5, 5),
with_tf32=True,
tf32_precision=0.05,
),
dict(
fullname='Conv3d_dilated_strided',
constructor=lambda: nn.Conv3d(3, 4, kernel_size=2, dilation=2, stride=2),
cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).dilation(2).stride(2)',
input_size=(2, 3, 5, 5, 5),
with_tf32=True,
tf32_precision=0.05
),
dict(
fullname='Conv3d_pad_valid',
constructor=lambda: nn.Conv3d(3, 4, (2, 3, 4), padding="valid"),
cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4}).padding(torch::kValid)',
input_size=(2, 3, 6, 5, 4),
cudnn=True,
with_tf32=True,
tf32_precision=0.05,
),
dict(
fullname='Conv3d_pad_same',
constructor=lambda: nn.Conv3d(3, 4, (2, 3, 4), padding="same"),
cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4}).padding(torch::kSame)',
input_size=(2, 3, 6, 5, 4),
cudnn=True,
with_tf32=True,
tf32_precision=0.05,
),
dict(
fullname='Conv3d_pad_same_dilated',
constructor=lambda: nn.Conv3d(3, 4, (2, 3, 4), padding="same", dilation=2),
cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4}).padding(torch::kSame).dilation(2)',
input_size=(2, 3, 6, 5, 4),
cudnn=True,
with_tf32=True,
tf32_precision=0.05,
),
dict(
module_name='ConvTranspose3d',
constructor_args=(2, 3, (2, 3, 2)),
cpp_constructor_args='torch::nn::ConvTranspose3dOptions(2, 3, {2, 3, 2})',
cudnn=True,
input_size=(1, 2, 4, 5, 4),
with_tf32=True,
tf32_precision=0.05
),
dict(
module_name='ConvTranspose3d',
constructor_args=(2, 3, (2, 3, 2), 1, 0, 0, 1, True, (2, 2, 2)),
cpp_constructor_args='''torch::nn::ConvTranspose3dOptions(2, 3, {2, 3, 2})
.stride(1).padding(0).output_padding(0).groups(1).bias(true).dilation({2, 2, 2})''',
cudnn=True,
input_size=(1, 2, 4, 5, 4),
desc='dilated',
with_tf32=True,
tf32_precision=0.05
),
dict(
module_name='MaxPool3d',
constructor_args=((2, 2, 2),),
cpp_constructor_args='torch::nn::MaxPool3dOptions({2, 2, 2})',
input_size=(2, 3, 5, 5, 5),
),
dict(
module_name='MaxPool3d',
constructor_args=(2, (2, 2, 2)),
cpp_constructor_args='torch::nn::MaxPool3dOptions(2).stride({2, 2, 2})',
input_size=(2, 3, 5, 5, 5),
desc='stride',
),
dict(
module_name='MaxPool3d',
constructor_args=(2, 2, (1, 1, 1)),
cpp_constructor_args='torch::nn::MaxPool3dOptions(2).stride(2).padding({1, 1, 1})',
input_size=(2, 3, 5, 5, 5),
desc='stride_padding',
),
dict(
module_name='MaxPool3d',
fullname='MaxPool3d_return_indices',
constructor=lambda: nn.MaxPool3d(2, 2, (1, 1, 1), return_indices=True),
input_size=(2, 3, 5, 5, 5),
test_cpp_api_parity=False,
),
dict(
module_name='AvgPool3d',
constructor_args=((2, 2, 2),),
cpp_constructor_args='torch::nn::AvgPool3dOptions({2, 2, 2})',
input_size=(2, 3, 4, 4, 4),
),
dict(
module_name='AvgPool3d',
constructor_args=((2, 2, 2),),
cpp_constructor_args='torch::nn::AvgPool3dOptions({2, 2, 2})',
input_size=(3, 4, 4, 4),
desc='no_batch_dim',
),
dict(
module_name='AvgPool3d',
constructor_args=(2, (2, 2, 2)),
cpp_constructor_args='torch::nn::AvgPool3dOptions(2).stride({2, 2, 2})',
input_size=(2, 3, 5, 5, 5),
desc='stride',
),
dict(
module_name='AvgPool3d',
constructor_args=(2, 2, (1, 1, 1)),
cpp_constructor_args='torch::nn::AvgPool3dOptions(2).stride(2).padding({1, 1, 1})',
input_size=(2, 3, 5, 5, 5),
desc='stride_pad',
),
dict(
module_name='AvgPool3d',
constructor_args=(4, 2, (1, 2, 1)),
cpp_constructor_args='torch::nn::AvgPool3dOptions(4).stride(2).padding({1, 2, 1})',
input_size=(2, 3, 5, 5, 5),
desc='stride_pad_gpu_fixedkw_output',
),
dict(
module_name='AvgPool3d',
constructor_args=((2, 4, 8), 1, (1, 1, 2)),
cpp_constructor_args='torch::nn::AvgPool3dOptions({2, 4, 8}).stride(1).padding({1, 1, 2})',
input_size=(2, 3, 2, 4, 8),
desc='stride_pad_gpu_general_output',
),
dict(
module_name='AvgPool3d',
constructor_args=(3, 1, 0),
cpp_constructor_args='torch::nn::AvgPool3dOptions(3).stride(1).padding(0)',
input_size=(2, 3, 4, 4, 4),
desc='stride1_pad0_gpu_input',
),
dict(
module_name='AvgPool3d',
constructor_args=(2, 2, (1, 1, 1)),
cpp_constructor_args='torch::nn::AvgPool3dOptions(2).stride(2).padding({1, 1, 1})',
input_size=(2, 3, 4, 4, 4),
desc='stride_pad_gpu_input_nooverlap',
),
dict(
fullname='AvgPool3d_divisor',
constructor=lambda: nn.AvgPool3d((2, 2, 2), divisor_override=1),
cpp_constructor_args='torch::nn::AvgPool3dOptions({2, 2, 2}).divisor_override(1)',
input_size=(2, 3, 4, 4, 4),
check_with_long_tensor=True,
),
dict(
fullname='AvgPool3d_divisor_stride',
constructor=lambda: nn.AvgPool3d(2, (2, 2, 2), divisor_override=1),
cpp_constructor_args='torch::nn::AvgPool3dOptions(2).stride({2, 2, 2}).divisor_override(1)',
input_size=(2, 3, 5, 5, 5),
check_with_long_tensor=True,
),
dict(
fullname='AvgPool3d_divisor_stride_pad',
constructor=lambda: nn.AvgPool3d(2, 2, (1, 1, 1), divisor_override=1),
cpp_constructor_args='torch::nn::AvgPool3dOptions(2).stride(2).padding({1, 1, 1}).divisor_override(1)',
input_size=(2, 3, 5, 5, 5),
check_with_long_tensor=True,
),
dict(
fullname='AvgPool3d_divisor_stride_pad_gpu_fixedkw_output',
constructor=lambda: nn.AvgPool3d(4, 2, (1, 2, 1), divisor_override=1),
cpp_constructor_args='torch::nn::AvgPool3dOptions(4).stride(2).padding({1, 2, 1}).divisor_override(1)',
input_size=(2, 3, 5, 5, 5),
check_with_long_tensor=True,
),
dict(
fullname='AvgPool3d_divisor_stride_pad_gpu_general_output',
constructor=lambda: nn.AvgPool3d((2, 4, 8), 1, (1, 1, 2), divisor_override=1),
cpp_constructor_args='torch::nn::AvgPool3dOptions({2, 4, 8}).stride(1).padding({1, 1, 2}).divisor_override(1)',
input_size=(2, 3, 2, 4, 8),
check_with_long_tensor=True,
),
dict(
fullname='AvgPool3d_divisor_stride1_pad0_gpu_input',
constructor=lambda: nn.AvgPool3d(3, 1, 0, divisor_override=1),
cpp_constructor_args='torch::nn::AvgPool3dOptions(3).stride(1).padding(0).divisor_override(1)',
input_size=(2, 3, 4, 4, 4),
check_with_long_tensor=True,
),
dict(
fullname='AvgPool3d_divisor_stride_pad_gpu_input_nooverlap',
constructor=lambda: nn.AvgPool3d(2, 2, (1, 1, 1), divisor_override=1),
cpp_constructor_args='torch::nn::AvgPool3dOptions(2).stride(2).padding({1, 1, 1}).divisor_override(1)',
input_size=(2, 3, 4, 4, 4),
check_with_long_tensor=True,
),
dict(
module_name='ReplicationPad3d',
constructor_args=((1, 2, 3, 3, 2, 1),),
cpp_constructor_args='torch::nn::ReplicationPad3dOptions({1, 2, 3, 3, 2, 1})',
input_size=(2, 3, 2, 2, 2),
),
dict(
module_name='ReplicationPad3d',
constructor_args=((1, 2, 3, 3, 2, 1),),
cpp_constructor_args='torch::nn::ReplicationPad3dOptions({1, 2, 3, 3, 2, 1})',
input_size=(3, 2, 2, 2),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='ReplicationPad3d',
constructor_args=((1, 2, 3, 3, 2, 1),),
cpp_constructor_args='torch::nn::ReplicationPad3dOptions({1, 2, 3, 3, 2, 1})',
input_fn=lambda: torch.rand(2, 3, 2, 2, 2, dtype=torch.complex128, requires_grad=True),
skip_half=True,
desc='complex'
),
dict(
module_name='Embedding',
constructor_args=(4, 3),
cpp_constructor_args='torch::nn::EmbeddingOptions(4, 3)',
input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),
check_gradgrad=False,
),
dict(
module_name='Embedding',
constructor_args=(4, 3),
cpp_constructor_args='torch::nn::EmbeddingOptions(4, 3)',
input_fn=lambda: torch.empty(1, 512, dtype=torch.long).random_(4).expand(7, 512),
check_gradgrad=False,
desc='discontiguous'
),
dict(
module_name='EmbeddingBag',
constructor_args=(4, 3),
cpp_constructor_args='torch::nn::EmbeddingBagOptions(4, 3)',
input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),
check_gradgrad=False,
desc='mean',
),
dict(
module_name='EmbeddingBag',
constructor_args=(4, 3),
cpp_constructor_args='torch::nn::EmbeddingBagOptions(4, 3)',
input_fn=lambda: torch.empty(1, 512, dtype=torch.long).random_(4).expand(7, 512),
check_gradgrad=False,
desc='discontiguous',
),
dict(
module_name='EmbeddingBag',
constructor_args=(4, 3, None, 2., False, 'sum'),
cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3)
.max_norm(c10::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kSum)''',
input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),
check_gradgrad=False,
desc='sum',
),
dict(
module_name='EmbeddingBag',
constructor_args=(4, 3, None, 2., False, 'max'),
cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3)
.max_norm(c10::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kMax)''',
input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),
check_gradgrad=False,
desc='max',
),
dict(
fullname='EmbeddingBag_mean_padding_idx',
constructor=lambda: nn.EmbeddingBag(4, 3, padding_idx=1),
cpp_constructor_args='torch::nn::EmbeddingBagOptions(4, 3).padding_idx(1)',
input_fn=lambda: torch.stack([torch.randperm(3), torch.randperm(3)]),
check_gradgrad=False,
),
dict(
fullname='EmbeddingBag_sum_padding_idx',
constructor=lambda: nn.EmbeddingBag(4, 3, None, 2., False, 'sum', padding_idx=1),
cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3)
.max_norm(c10::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kSum).padding_idx(1)''',
input_fn=lambda: torch.stack([torch.randperm(3), torch.randperm(3)]),
check_gradgrad=False,
),
dict(
fullname='EmbeddingBag_max_padding_idx',
constructor=lambda: nn.EmbeddingBag(4, 3, None, 2., False, 'max', padding_idx=1),
cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3)
.max_norm(c10::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kMax).padding_idx(1)''',
input_fn=lambda: torch.stack([torch.randperm(3), torch.randperm(3)]),
check_gradgrad=False,
),
dict(
fullname='EmbeddingBag_sparse',
constructor=lambda: nn.EmbeddingBag(4, 3, sparse=True),
cpp_constructor_args='torch::nn::EmbeddingBagOptions(4, 3).sparse(true)',
input_fn=lambda: torch.randperm(2).repeat(1, 2),
check_gradgrad=False,
has_sparse_gradients=True,
),
dict(
constructor=lambda: nn.Embedding(4, 3, sparse=True),
cpp_constructor_args='torch::nn::EmbeddingOptions(4, 3).sparse(true)',
input_fn=lambda: torch.randperm(2).repeat(1, 2),
fullname='Embedding_sparse',
check_gradgrad=False,
has_sparse_gradients=True,
),
dict(
module_name='PixelShuffle',
constructor_args=(3,),
cpp_constructor_args='torch::nn::PixelShuffleOptions(3)',
input_size=(1, 9, 4, 4),
),
dict(
module_name='PixelUnshuffle',
constructor_args=(3,),
cpp_constructor_args='torch::nn::PixelUnshuffleOptions(3)',
input_size=(1, 1, 12, 12),
),
dict(
constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12})).scale_factor(c10::nullopt).mode(torch::kNearest)''',
input_size=(1, 2, 4),
fullname='interpolate_nearest_1d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12})).scale_factor(c10::nullopt).mode(torch::kNearest)''',
input_size=(0, 2, 4),
fullname='interpolate_nearest_1d_zero_dim',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=(12, ), scale_factor=None, mode='nearest'),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12})).scale_factor(c10::nullopt).mode(torch::kNearest)''',
input_size=(1, 2, 3),
fullname='interpolate_nearest_tuple_1d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='nearest'),
cpp_options_args='''F::InterpolateFuncOptions()
.size(c10::nullopt).scale_factor(std::vector<double>({4.})).mode(torch::kNearest)''',
input_size=(1, 2, 4),
fullname='interpolate_nearest_scale_1d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='linear', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12}))
.scale_factor(c10::nullopt)
.mode(torch::kLinear)
.align_corners(false)''',
input_size=(1, 2, 4),
fullname='interpolate_linear_1d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=(4, ), scale_factor=None, mode='linear', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({4}))
.scale_factor(c10::nullopt)
.mode(torch::kLinear)
.align_corners(false)''',
input_size=(1, 2, 3),
fullname='interpolate_linear_tuple_1d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='linear', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({4.}))
.mode(torch::kLinear)
.align_corners(false)''',
input_size=(1, 2, 4),
fullname='interpolate_linear_scale_1d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='linear', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12}))
.scale_factor(c10::nullopt)
.mode(torch::kLinear)
.align_corners(false)''',
input_size=(0, 2, 4),
fullname='interpolate_linear_1d_zero_dim',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='linear', align_corners=True),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12}))
.scale_factor(c10::nullopt)
.mode(torch::kLinear)
.align_corners(true)''',
input_size=(1, 2, 4),
fullname='interpolate_linear_1d_align_corners',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='linear', align_corners=True),
cpp_options_args='''F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({4.}))
.mode(torch::kLinear)
.align_corners(true)''',
input_size=(1, 2, 4),
fullname='interpolate_linear_scale_1d_align_corners',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=2, scale_factor=None, mode='nearest'),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({2, 2}))
.scale_factor(c10::nullopt)
.mode(torch::kNearest)''',
input_size=(1, 128, 1, 1),
fullname='interpolate_nearest_2d_launch_configs',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kNearest)''',
input_size=(1, 2, 4, 4),
fullname='interpolate_nearest_2d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=(12, 16), scale_factor=None, mode='nearest'),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 16}))
.scale_factor(c10::nullopt)
.mode(torch::kNearest)''',
input_size=(1, 2, 3, 4),
fullname='interpolate_nearest_tuple_2d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='nearest'),
cpp_options_args='''F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({4., 4.}))
.mode(torch::kNearest)''',
input_size=(1, 2, 4, 4),
fullname='interpolate_nearest_scale_2d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kNearest)''',
input_size=(0, 2, 4, 4),
fullname='interpolate_nearest_2d_zero_dim',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bilinear', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kBilinear)
.align_corners(false)''',
input_size=(1, 2, 4, 4),
fullname='interpolate_bilinear_2d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bilinear', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kBilinear)
.align_corners(false)''',
input_size=(0, 2, 4, 4),
fullname='interpolate_bilinear_2d_zero_dim',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None,
mode='bilinear', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({4, 6}))
.scale_factor(c10::nullopt)
.mode(torch::kBilinear)
.align_corners(false)''',
input_size=(1, 2, 2, 3),
fullname='interpolate_bilinear_tuple_2d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=None, scale_factor=4.,
mode='bilinear', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({4., 4.}))
.mode(torch::kBilinear)
.align_corners(false)''',
input_size=(1, 2, 4, 4),
fullname='interpolate_bilinear_scale_2d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 2.),
mode='bilinear', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({2., 2.}))
.mode(torch::kBilinear)
.align_corners(false)''',
input_size=(1, 2, 4, 4),
fullname='interpolate_bilinear_scale_tuple_shared_2d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.),
mode='bilinear', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({2., 1.}))
.mode(torch::kBilinear)
.align_corners(false)''',
input_size=(1, 2, 4, 4),
fullname='interpolate_bilinear_scale_tuple_skewed_2d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, mode='bilinear', align_corners=True),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({4, 6}))
.scale_factor(c10::nullopt)
.mode(torch::kBilinear)
.align_corners(true)''',
input_size=(1, 2, 4, 4),
fullname='interpolate_bilinear_tuple_2d_align_corners',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.),
mode='bilinear', align_corners=True),
cpp_options_args='''F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({2., 1.}))
.mode(torch::kBilinear)
.align_corners(true)''',
input_size=(1, 2, 4, 4),
fullname='interpolate_bilinear_scale_tuple_skewed_2d_align_corners',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bicubic', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kBicubic)
.align_corners(false)''',
input_size=(1, 2, 4, 4),
fullname='interpolate_bicubic_2d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bicubic', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kBicubic)
.align_corners(false)''',
input_size=(0, 2, 4, 4),
fullname='interpolate_bicubic_2d_zero_dim',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None,
mode='bicubic', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({4, 6}))
.scale_factor(c10::nullopt)
.mode(torch::kBicubic)
.align_corners(false)''',
input_size=(1, 2, 2, 3),
fullname='interpolate_bicubic_tuple_2d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='bicubic', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({4., 4.}))
.mode(torch::kBicubic)
.align_corners(false)''',
input_size=(1, 2, 4, 4),
fullname='interpolate_bicubic_scale_2d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 2.),
mode='bicubic', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({2., 2.}))
.mode(torch::kBicubic)
.align_corners(false)''',
input_size=(1, 2, 4, 4),
fullname='interpolate_bicubic_scale_tuple_shared_2d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.),
mode='bicubic', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({2., 1.}))
.mode(torch::kBicubic)
.align_corners(false)''',
input_size=(1, 2, 4, 4),
fullname='interpolate_bicubic_scale_tuple_skewed_2d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, mode='bicubic', align_corners=True),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({4, 6}))
.scale_factor(c10::nullopt)
.mode(torch::kBicubic)
.align_corners(true)''',
input_size=(1, 2, 4, 4),
fullname='interpolate_bicubic_tuple_2d_align_corners',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.),
mode='bicubic', align_corners=True),
cpp_options_args='''F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({2., 1.}))
.mode(torch::kBicubic)
.align_corners(true)''',
input_size=(1, 2, 4, 4),
fullname='interpolate_bicubic_scale_tuple_skewed_2d_align_corners',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kNearest)''',
input_size=(1, 2, 4, 4, 4),
fullname='interpolate_nearest_3d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kNearest)''',
input_size=(0, 2, 4, 4, 4),
fullname='interpolate_nearest_3d_zero_dim',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=(12, 16, 16), scale_factor=None, mode='nearest'),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 16, 16}))
.scale_factor(c10::nullopt)
.mode(torch::kNearest)''',
input_size=(1, 2, 3, 4, 4),
fullname='interpolate_nearest_tuple_3d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='nearest'),
cpp_options_args='''F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({4., 4., 4.}))
.mode(torch::kNearest)''',
input_size=(1, 2, 4, 4, 4),
fullname='interpolate_nearest_scale_3d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='trilinear', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kTrilinear)
.align_corners(false)''',
input_size=(1, 2, 4, 4, 4),
fullname='interpolate_trilinear_3d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='trilinear', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kTrilinear)
.align_corners(false)''',
input_size=(0, 2, 4, 4, 4),
fullname='interpolate_trilinear_3d_zero_dim',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=(4, 6, 6),
scale_factor=None, mode='trilinear', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({4, 6, 6}))
.scale_factor(c10::nullopt)
.mode(torch::kTrilinear)
.align_corners(false)''',
input_size=(1, 2, 2, 3, 3),
fullname='interpolate_trilinear_tuple_3d',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=None, scale_factor=3., mode='trilinear', align_corners=False),
cpp_options_args='''F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({3., 3., 3.}))
.mode(torch::kTrilinear)
.align_corners(false)''',
input_size=(1, 2, 3, 4, 5),
fullname='interpolate_trilinear_scale_3d',
# See https://github.com/pytorch/pytorch/issues/5006
precision=3e-4,
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=(4, 6, 6), scale_factor=None,
mode='trilinear', align_corners=True),
cpp_options_args='''F::InterpolateFuncOptions()
.size(std::vector<int64_t>({4, 6, 6}))
.scale_factor(c10::nullopt)
.mode(torch::kTrilinear)
.align_corners(true)''',
input_size=(1, 2, 2, 3, 3),
fullname='interpolate_trilinear_tuple_3d_align_corners',
pickle=False,
),
dict(
constructor=wrap_functional(F.interpolate, size=None, scale_factor=3., mode='trilinear', align_corners=True),
cpp_options_args='''F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({3., 3., 3.}))
.mode(torch::kTrilinear)
.align_corners(true)''',
input_size=(1, 2, 3, 4, 4),
fullname='interpolate_trilinear_scale_3d_align_corners',
# See https://github.com/pytorch/pytorch/issues/5006
precision=3e-4,
pickle=False,
),
dict(
module_name='AdaptiveMaxPool1d',
constructor_args=(3,),
cpp_constructor_args='torch::nn::AdaptiveMaxPool1dOptions(3)',
input_fn=lambda: _rand_tensor_non_equal(1, 3, 5),
),
dict(
module_name='AdaptiveMaxPool1d',
constructor_args=(3,),
cpp_constructor_args='torch::nn::AdaptiveMaxPool1dOptions(3)',
input_fn=lambda: _rand_tensor_non_equal(3, 5),
desc='no_batch_dim',
),
dict(
module_name='AdaptiveMaxPool2d',
constructor_args=(3,),
cpp_constructor_args='torch::nn::AdaptiveMaxPool2dOptions(3)',
input_fn=lambda: _rand_tensor_non_equal(1, 3, 5, 6),
desc='single',
),
dict(
module_name='AdaptiveMaxPool2d',
constructor_args=((3, 4),),
cpp_constructor_args='torch::nn::AdaptiveMaxPool2dOptions({3, 4})',
input_fn=lambda: _rand_tensor_non_equal(1, 3, 5, 6),
desc='tuple',
),
dict(
module_name='AdaptiveMaxPool2d',
constructor_args=(3,),
cpp_constructor_args='torch::nn::AdaptiveMaxPool2dOptions(3)',
input_fn=lambda: _rand_tensor_non_equal(3, 5, 6),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='AdaptiveMaxPool2d',
constructor_args=((3, None),),
cpp_constructor_args='torch::nn::AdaptiveMaxPool2dOptions({3, c10::nullopt})',
input_fn=lambda: _rand_tensor_non_equal(1, 3, 5, 6),
desc='tuple_none',
),
dict(
module_name='AdaptiveMaxPool3d',
constructor_args=(3,),
cpp_constructor_args='torch::nn::AdaptiveMaxPool3dOptions(3)',
input_fn=lambda: _rand_tensor_non_equal(2, 3, 5, 6, 7),
desc='single',
),
dict(
module_name='AdaptiveMaxPool3d',
constructor_args=(3,),
cpp_constructor_args='torch::nn::AdaptiveMaxPool3dOptions(3)',
input_fn=lambda: _rand_tensor_non_equal(3, 5, 6, 7),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='AdaptiveMaxPool3d',
constructor_args=((3, 4, 5),),
cpp_constructor_args='torch::nn::AdaptiveMaxPool3dOptions({3, 4, 5})',
input_fn=lambda: _rand_tensor_non_equal(2, 3, 5, 6, 7),
desc='tuple',
),
dict(
module_name='AdaptiveMaxPool3d',
constructor_args=((3, None, 5),),
cpp_constructor_args='torch::nn::AdaptiveMaxPool3dOptions({3, c10::nullopt, 5})',
input_fn=lambda: _rand_tensor_non_equal(2, 3, 5, 6, 7),
desc='tuple_none',
),
dict(
module_name='AdaptiveMaxPool3d',
constructor_args=(3,),
cpp_constructor_args='torch::nn::AdaptiveMaxPool3dOptions(3)',
input_fn=lambda: _rand_tensor_non_equal(2, 3, 12, 9, 3),
desc='single_nonatomic',
),
dict(
module_name='AdaptiveMaxPool3d',
constructor_args=((3, 4, 5),),
cpp_constructor_args='torch::nn::AdaptiveMaxPool3dOptions({3, 4, 5})',
input_fn=lambda: _rand_tensor_non_equal(2, 3, 6, 4, 10),
desc='tuple_nonatomic',
),
dict(
module_name='AdaptiveAvgPool1d',
constructor_args=(3,),
cpp_constructor_args='torch::nn::AdaptiveAvgPool1dOptions(3)',
input_fn=lambda: torch.rand(1, 3, 5),
),
dict(
module_name='AdaptiveAvgPool1d',
constructor_args=(3,),
cpp_constructor_args='torch::nn::AdaptiveAvgPool1dOptions(3)',
input_fn=lambda: torch.rand(3, 5),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='AdaptiveAvgPool1d',
constructor_args=(1,),
cpp_constructor_args='torch::nn::AdaptiveAvgPool1dOptions(1)',
input_fn=lambda: torch.rand(1, 3, 5),
desc='one_output',
),
dict(
module_name='AdaptiveAvgPool2d',
constructor_args=(3,),
cpp_constructor_args='torch::nn::AdaptiveAvgPool2dOptions(3)',
input_fn=lambda: torch.rand(1, 3, 5, 6),
desc='single',
),
dict(
module_name='AdaptiveAvgPool2d',
constructor_args=(3,),
cpp_constructor_args='torch::nn::AdaptiveAvgPool2dOptions(3)',
input_fn=lambda: torch.rand(3, 5, 6),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='AdaptiveAvgPool2d',
constructor_args=(1,),
cpp_constructor_args='torch::nn::AdaptiveAvgPool2dOptions(1)',
input_fn=lambda: torch.rand(1, 3, 5, 6),
desc='single_1x1output',
),
dict(
module_name='AdaptiveAvgPool2d',
constructor_args=((3, 4),),
cpp_constructor_args='torch::nn::AdaptiveAvgPool2dOptions({3, 4})',
input_fn=lambda: torch.rand(1, 3, 5, 6),
desc='tuple',
),
dict(
module_name='AdaptiveAvgPool2d',
constructor_args=((3, None),),
cpp_constructor_args='torch::nn::AdaptiveAvgPool2dOptions({3, c10::nullopt})',
input_fn=lambda: torch.rand(1, 3, 5, 6),
desc='tuple_none',
),
dict(
module_name='AdaptiveAvgPool3d',
constructor_args=(3,),
cpp_constructor_args='torch::nn::AdaptiveAvgPool3dOptions(3)',
input_fn=lambda: torch.rand(2, 3, 5, 2, 7),
desc='single',
),
dict(
module_name='AdaptiveAvgPool3d',
constructor_args=(3,),
cpp_constructor_args='torch::nn::AdaptiveAvgPool3dOptions(3)',
input_fn=lambda: torch.rand(3, 5, 2, 7),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='AdaptiveAvgPool3d',
constructor_args=((3, 4, 5),),
cpp_constructor_args='torch::nn::AdaptiveAvgPool3dOptions({3, 4, 5})',
input_fn=lambda: torch.rand(2, 3, 5, 3, 7),
desc='tuple',
),
dict(
module_name='AdaptiveAvgPool3d',
constructor_args=((None, 4, 5),),
cpp_constructor_args='torch::nn::AdaptiveAvgPool3dOptions({c10::nullopt, 4, 5})',
input_fn=lambda: torch.rand(2, 3, 5, 3, 7),
desc='tuple_none',
),
dict(
module_name='AdaptiveAvgPool3d',
constructor_args=((3, 2, 2),),
cpp_constructor_args='torch::nn::AdaptiveAvgPool3dOptions({3, 2, 2})',
input_fn=lambda: torch.rand(1, 1, 3, 2, 6),
desc='last_dim',
),
dict(
module_name='SELU',
input_size=(3, 2, 5),
check_inplace=True
),
dict(
module_name='SELU',
input_size=(),
check_inplace=True,
desc='scalar'
),
dict(
module_name='CELU',
input_size=(3, 2, 5),
constructor_args=(2.,),
cpp_constructor_args='torch::nn::CELUOptions().alpha(2.)',
check_inplace=True,
reference_fn=lambda x, *_: torch.where(x >= 0, x, 2. * ((.5 * x).exp() - 1)),
),
dict(
module_name='CELU',
input_size=(),
constructor_args=(2.,),
cpp_constructor_args='torch::nn::CELUOptions().alpha(2.)',
check_inplace=True,
reference_fn=lambda x, *_: torch.where(x >= 0, x, 2. * ((.5 * x).exp() - 1)),
desc='scalar'
),
dict(
module_name='GLU',
input_size=(5, 6),
),
dict(
module_name='GLU',
constructor_args=(1,),
cpp_constructor_args='torch::nn::GLUOptions(1)',
input_size=(5, 6, 7),
desc='dim',
),
dict(
module_name='GELU',
constructor_args=('none',),
cpp_constructor_args='torch::nn::GELUOptions().approximate(\"none\")',
input_size=(),
desc='scalar',
reference_fn=lambda x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))),
),
dict(
module_name='GELU',
constructor_args=('none',),
cpp_constructor_args='torch::nn::GELUOptions().approximate(\"none\")',
input_size=(3, 2, 5),
reference_fn=lambda x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))),
),
dict(
module_name='SiLU',
input_size=(),
desc='scalar',
reference_fn=lambda x, *_: x * torch.sigmoid(x),
),
dict(
module_name='SiLU',
input_size=(5, 6, 7),
reference_fn=lambda x, *_: x * torch.sigmoid(x),
),
dict(
module_name='Mish',
input_size=(),
desc='scalar',
reference_fn=lambda x, *_: x * torch.tanh(F.softplus(x)),
),
dict(
module_name='Mish',
input_size=(5, 6, 7),
reference_fn=lambda x, *_: x * torch.tanh(F.softplus(x)),
),
dict(
constructor=wrap_functional(F.softmax, dim=-1),
cpp_options_args='F::SoftmaxFuncOptions(-1)',
input_size=(2, 128), # trigger the last-dim algo in CUDA
fullname='softmax_lastdim',
pickle=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=1, dtype=torch.float64),
cpp_options_args='F::SoftmaxFuncOptions(1).dtype(torch::kFloat64)',
input_size=(2, 128),
fullname='softmax_lastdim_dtype',
pickle=False,
test_cuda=False
),
dict(
constructor=wrap_functional(F.softmax, dim=1),
cpp_options_args='F::SoftmaxFuncOptions(1)',
input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo
fullname='softmax_spatial_special',
pickle=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=1),
cpp_options_args='F::SoftmaxFuncOptions(1)',
input_size=(2, 2, 4, 4), # regular spatial algorithm
fullname='softmax_spatial',
pickle=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=1, dtype=torch.float64),
cpp_options_args='F::SoftmaxFuncOptions(1).dtype(torch::kFloat64)',
input_size=(2, 2, 4, 4), # regular spatial algorithm
fullname='softmax_spatial_dtype',
pickle=False,
test_cuda=False
),
dict(
constructor=wrap_functional(F.softmax, dim=0),
cpp_options_args='F::SoftmaxFuncOptions(0)',
input_size=(2, 3, 4, 5),
fullname='softmax_functional_dim0',
test_cuda=False,
pickle=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=3),
cpp_options_args='F::SoftmaxFuncOptions(3)',
input_size=(2, 3, 4, 5),
fullname='softmax_functional_dim3',
test_cuda=False,
pickle=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=-1),
cpp_options_args='F::SoftmaxFuncOptions(-1)',
input_size=(),
fullname='softmax_functional_scalar',
test_cuda=False,
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=-1),
cpp_options_args='F::LogSoftmaxFuncOptions(-1)',
input_size=(2, 128), # trigger the last-dim algo in CUDA
fullname='log_softmax_lastdim',
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=1),
cpp_options_args='F::LogSoftmaxFuncOptions(1)',
input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo
fullname='log_softmax_spatial_special',
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=1),
cpp_options_args='F::LogSoftmaxFuncOptions(1)',
input_size=(2, 2, 4, 4), # regular spatial algorithm
fullname='log_softmax_spatial',
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=0),
cpp_options_args='F::LogSoftmaxFuncOptions(0)',
input_size=(2, 3, 4, 5),
fullname='log_softmax_dim0',
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=3),
cpp_options_args='F::LogSoftmaxFuncOptions(3)',
input_size=(2, 3, 4, 5),
fullname='log_softmax_dim3',
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=0),
cpp_options_args='F::LogSoftmaxFuncOptions(0)',
input_size=(),
fullname='log_softmax_scalar',
pickle=False,
),
dict(
module_name='Softmax2d',
input_size=(3, 4, 5),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='Softmax',
constructor_args=(-1,),
cpp_constructor_args='torch::nn::SoftmaxOptions(-1)',
input_size=(4, 5),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='LogSoftmax',
constructor_args=(-1,),
cpp_constructor_args='torch::nn::LogSoftmaxOptions(1)',
input_size=(4, 5),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
fullname='Unfold',
constructor=lambda: nn.Unfold((2, 2), (1, 1), (0, 0), (1, 1)),
cpp_constructor_args='torch::nn::UnfoldOptions({2, 2}).dilation({1, 1}).padding({0, 0}).stride({1, 1})',
input_size=(2, 4, 3, 3),
check_gradgrad=False,
test_cuda=True,
),
dict(
fullname='Fold',
constructor=lambda: nn.Fold((3, 3), (2, 2), (1, 1), (0, 0), (1, 1)),
cpp_constructor_args='torch::nn::FoldOptions({3, 3}, {2, 2}).dilation({1, 1}).padding({0, 0}).stride({1, 1})',
input_size=(2, 16, 4),
check_gradgrad=False,
test_cuda=True,
),
dict(
fullname='Fold_no_batch_dim_input',
constructor=lambda: nn.Fold((3, 3), (2, 2), (1, 1), (0, 0), (1, 1)),
cpp_constructor_args='torch::nn::FoldOptions({3, 3}, {2, 2}).dilation({1, 1}).padding({0, 0}).stride({1, 1})',
input_size=(16, 4),
check_gradgrad=False,
ref=single_batch_reference_fn,
test_cuda=True,
),
dict(
fullname='Unfold_int_input',
constructor=lambda: nn.Unfold(2, 1, 0, 1),
cpp_constructor_args='torch::nn::UnfoldOptions(2).dilation(1).padding(0).stride(1)',
input_size=(2, 4, 3, 3),
check_gradgrad=False,
test_cuda=True,
),
dict(
fullname='Fold_int_input',
constructor=lambda: nn.Fold(3, 2, 1, 0, 1),
cpp_constructor_args='torch::nn::FoldOptions(3, 2).dilation(1).padding(0).stride(1)',
input_size=(2, 16, 4),
check_gradgrad=False,
test_cuda=True,
),
dict(
fullname='Fold_no_batch_dim_int_input',
constructor=lambda: nn.Fold(3, 2, 1, 0, 1),
cpp_constructor_args='torch::nn::FoldOptions(3, 2).dilation(1).padding(0).stride(1)',
input_size=(16, 4),
ref=single_batch_reference_fn,
check_gradgrad=False,
test_cuda=True,
),
dict(
module_name='Threshold',
constructor_args=(2., 1.),
cpp_constructor_args='torch::nn::ThresholdOptions(2., 1.)',
input_size=(),
check_inplace=True,
desc='threshold_value_scalar'
),
dict(
module_name='ReLU',
input_size=(),
check_inplace=True,
desc='scalar'
),
dict(
module_name='ReLU6',
input_size=(),
check_inplace=True,
desc='scalar'
),
dict(
module_name='RReLU',
constructor_args=(0.1, 0.9),
cpp_constructor_args='torch::nn::RReLUOptions().lower(0.1).upper(0.9)',
input_size=(),
desc='with_up_down_scalar',
test_cuda=False,
),
dict(
module_name='Hardtanh',
input_size=(),
reference_fn=lambda i, *_: i.clamp(-1, 1),
desc='scalar'
),
dict(
module_name='Sigmoid',
input_size=(),
desc='scalar',
),
dict(
module_name='Tanh',
input_size=(),
desc='scalar',
),
dict(
module_name='Softmax',
constructor_args=(0,),
cpp_constructor_args='torch::nn::SoftmaxOptions(0)',
input_size=(),
reference_fn=lambda i, *_: torch.exp(i).div(torch.exp(i).sum(0, True)),
desc='scalar',
),
dict(
module_name='LogSoftmax',
constructor_args=(0,),
cpp_constructor_args='torch::nn::LogSoftmaxOptions(0)',
input_size=(),
reference_fn=lambda i, *_: torch.exp(i).div_(torch.exp(i).sum(0, False)).log_(),
desc='multiparam_scalar',
),
dict(
module_name='ELU',
constructor_args=(2.,),
cpp_constructor_args='torch::nn::ELUOptions().alpha(2.)',
input_size=(),
desc='scalar',
),
dict(
module_name='Hardshrink',
constructor_args=(2.,),
cpp_constructor_args='torch::nn::HardshrinkOptions(2.)',
input_size=(),
desc='scalar',
),
dict(
module_name='LeakyReLU',
constructor_args=(0.5,),
cpp_constructor_args='torch::nn::LeakyReLUOptions().negative_slope(0.5)',
input_size=(),
check_inplace=True,
desc='with_negval_scalar'
),
dict(
module_name='LogSigmoid',
input_size=(),
reference_fn=lambda i, *_: i.sigmoid().log(),
desc='scalar'
),
dict(
module_name='Softplus',
constructor_args=(2, -100),
cpp_constructor_args='torch::nn::SoftplusOptions().beta(2).threshold(-100)',
input_size=(),
reference_fn=(
lambda i, *_: ((i * 2) > -100).type_as(i) * i
+ ((i * 2) <= -100).type_as(i) * 1.0 / 2.0 * torch.log(1 + torch.exp(2 * i))
),
desc='beta_threshold_scalar',
),
dict(
module_name='Softshrink',
constructor_args=(1,),
cpp_constructor_args='torch::nn::SoftshrinkOptions(1)',
input_size=(),
desc='lambda_scalar',
),
dict(
module_name='PReLU',
input_size=(),
reference_fn=lambda i, p, _: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
desc='scalar',
),
dict(
module_name='Softsign',
input_size=(),
reference_fn=lambda i, *_: i.div(1 + torch.abs(i)),
desc='scalar',
),
dict(
module_name='Softmin',
constructor_args=(0,),
cpp_constructor_args='torch::nn::SoftminOptions(0)',
input_size=(),
desc='scalar',
),
dict(
module_name='Softmin',
constructor_args=(-1,),
cpp_constructor_args='torch::nn::SoftminOptions(-1)',
input_size=(3, 4, 10),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='Tanhshrink',
input_size=(),
desc='scalar',
),
dict(
fullname='Padding12_1dcircular',
constructor=wrap_functional(F.pad, pad=(1, 2), mode='circular'),
cpp_options_args='F::PadFuncOptions({1, 2}).mode(torch::kCircular)',
input_fn=lambda: torch.arange(6, out=torch.DoubleTensor()).reshape([1, 2, 3]),
reference_fn=lambda i, *_: padding1d_circular(i, (1, 2)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname='Padding31_1dcircular',
constructor=wrap_functional(F.pad, pad=(3, 1), mode='circular'),
cpp_options_args='F::PadFuncOptions({3, 1}).mode(torch::kCircular)',
input_fn=lambda: torch.arange(6, out=torch.DoubleTensor()).reshape([1, 2, 3]),
reference_fn=lambda i, *_: padding1d_circular(i, (3, 1)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname='Padding33_1dcircular',
constructor=wrap_functional(F.pad, pad=(3, 3), mode='circular'),
cpp_options_args='F::PadFuncOptions({3, 3}).mode(torch::kCircular)',
input_fn=lambda: torch.arange(6, out=torch.DoubleTensor()).reshape([1, 2, 3]),
reference_fn=lambda i, *_: padding1d_circular(i, (3, 3)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname='Padding1221_2dcircular',
constructor=wrap_functional(F.pad, pad=(1, 2, 2, 1), mode='circular'),
cpp_options_args='F::PadFuncOptions({1, 2, 2, 1}).mode(torch::kCircular)',
input_fn=lambda: torch.arange(6, out=torch.DoubleTensor()).reshape([1, 1, 2, 3]),
reference_fn=lambda i, *_: padding2d_circular(i, (1, 2, 2, 1)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname='Padding2322_2dcircular',
constructor=wrap_functional(F.pad, pad=(2, 3, 2, 2), mode='circular'),
cpp_options_args='F::PadFuncOptions({2, 3, 2, 2}).mode(torch::kCircular)',
input_fn=lambda: torch.arange(6, out=torch.DoubleTensor()).reshape([1, 1, 2, 3]),
reference_fn=lambda i, *_: padding2d_circular(i, (2, 3, 2, 2)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname='Padding3331_2dcircular',
constructor=wrap_functional(F.pad, pad=(3, 3, 3, 1), mode='circular'),
cpp_options_args='F::PadFuncOptions({3, 3, 3, 1}).mode(torch::kCircular)',
input_fn=lambda: torch.arange(9, out=torch.DoubleTensor()).reshape([1, 1, 3, 3]),
reference_fn=lambda i, *_: padding2d_circular(i, (3, 3, 3, 1)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname='Padding122112_3dcircular',
constructor=wrap_functional(F.pad, pad=(1, 2, 2, 1, 1, 2), mode='circular'),
cpp_options_args='F::PadFuncOptions({1, 2, 2, 1, 1, 2}).mode(torch::kCircular)',
input_fn=lambda: torch.arange(12, out=torch.DoubleTensor()).reshape([1, 1, 2, 2, 3]),
reference_fn=lambda i, *_: padding3d_circular(i, (1, 2, 2, 1, 1, 2)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname='Padding322112_3dcircular',
constructor=wrap_functional(F.pad, pad=(3, 2, 2, 1, 1, 2), mode='circular'),
cpp_options_args='F::PadFuncOptions({3, 2, 2, 1, 1, 2}).mode(torch::kCircular)',
input_fn=lambda: torch.arange(12, out=torch.DoubleTensor()).reshape([1, 1, 2, 2, 3]),
reference_fn=lambda i, *_: padding3d_circular(i, (3, 2, 2, 1, 1, 2)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname='Padding332122_3dcircular',
constructor=wrap_functional(F.pad, pad=(3, 3, 2, 1, 2, 2), mode='circular'),
cpp_options_args='F::PadFuncOptions({3, 3, 2, 1, 2, 2}).mode(torch::kCircular)',
input_fn=lambda: torch.arange(12, out=torch.DoubleTensor()).reshape([1, 1, 2, 2, 3]),
reference_fn=lambda i, *_: padding3d_circular(i, (3, 3, 2, 1, 2, 2)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
module_name='PairwiseDistance',
input_fn=lambda: (torch.randn(10, 8), torch.randn(10, 8)),
),
dict(
module_name='PairwiseDistance',
input_fn=lambda: (torch.randn(10, 1), torch.randn(10, 8)),
desc='broadcast_lhs'
),
dict(
module_name='PairwiseDistance',
input_fn=lambda: (torch.randn(10, 8), torch.randn(1, 8)),
desc='broadcast_rhs'
),
dict(
module_name='PairwiseDistance',
constructor_args=(1.5, 1e-05, True),
cpp_constructor_args='torch::nn::PairwiseDistanceOptions().p(1.5).eps(1e-05).keepdim(true)',
input_fn=lambda: (torch.randn(10, 8), torch.randn(10, 8)),
desc='with_non_default_args',
),
dict(
module_name='PairwiseDistance',
input_fn=lambda: (torch.randn(8), torch.randn(8)),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
),
dict(
module_name='TransformerEncoderLayer',
constructor_args=(4, 2, 16, 0.0),
cpp_constructor_args='''torch::nn::TransformerEncoderLayerOptions(4, 2)
.dim_feedforward(16)
.dropout(0.0)''',
input_size=(2, 3, 4),
desc='relu_activation',
with_tf32=True,
tf32_precision=0.1,
# TODO(#50743): figure out the error
# RuntimeError: The size of tensor a (6) must match the size of tensor b (4)
# at non-singleton dimension 2
check_batched_grad=False,
),
dict(
module_name='TransformerEncoderLayer',
constructor_args=(4, 2, 8, 0.0, F.gelu),
cpp_constructor_args='''torch::nn::TransformerEncoderLayerOptions(4, 2)
.dim_feedforward(8)
.dropout(0.0)
.activation(torch::kGELU)''',
input_size=(2, 3, 4),
check_gradgrad=False,
desc='gelu_activation',
with_tf32=True,
tf32_precision=0.05,
),
dict(
module_name='TransformerDecoderLayer',
constructor_args=(4, 2, 8, 0.0),
cpp_constructor_args='''torch::nn::TransformerDecoderLayerOptions(4, 2)
.dim_feedforward(8)
.dropout(0.0)''',
input_fn=lambda: (torch.rand(3, 3, 4), torch.rand(2, 3, 4)),
check_gradgrad=False,
desc='relu_activation',
with_tf32=True,
tf32_precision=0.05,
),
dict(
module_name='TransformerDecoderLayer',
constructor_args=(4, 2, 8, 0.0, F.gelu),
cpp_constructor_args='''torch::nn::TransformerDecoderLayerOptions(4, 2)
.dim_feedforward(8)
.dropout(0.0)
.activation(torch::kGELU)''',
input_fn=lambda: (torch.rand(3, 3, 4), torch.rand(2, 3, 4)),
check_gradgrad=False,
desc='gelu_activation',
with_tf32=True,
tf32_precision=0.05,
),
dict(
module_name='Transformer',
constructor_args=(4, 2, 2, 2, 8, 0.0, F.relu),
cpp_constructor_args='''torch::nn::TransformerOptions()
.d_model(4)
.nhead(2)
.num_encoder_layers(2)
.num_decoder_layers(2)
.dim_feedforward(8)
.dropout(0.0)
.activation(torch::kReLU)''',
input_fn=lambda:(torch.rand(3, 3, 4), torch.rand(2, 3, 4), torch.rand(3, 3)),
check_gradgrad=False,
desc='multilayer_coder',
with_tf32=True,
tf32_precision=0.02,
),
dict(
module_name='Linear',
constructor_args=(3, 5),
cpp_constructor_args='torch::nn::LinearOptions(3, 5)',
input_fn=lambda: torch.rand(3),
reference_fn=lambda i, p, _: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1],
desc="no_batch_dim",
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='Flatten',
cpp_constructor_args='torch::nn::FlattenOptions().start_dim(-3).end_dim(-1)',
constructor_args=(-3, -1),
input_size=(3, 4, 5),
reference_fn=single_batch_reference_fn,
desc="no_batch_dim",
),
dict(
module_name='Unflatten',
cpp_constructor_args='torch::nn::UnflattenOptions(-2, {2, 2})',
constructor_args=(-2, torch.Size([2, 2])),
input_size=(3, 4, 5),
reference_fn=single_batch_reference_fn,
desc="no_batch_dim",
),
]
# add conv padding mode tests:
for padding_mode, cpp_padding_mode in zip(
['reflect', 'circular', 'replicate', 'zeros'],
['torch::kReflect', 'torch::kCircular', 'torch::kReplicate', 'torch::kZeros']):
# conv signature:
# in_channels, out_channels, kernel_size, stride=1,
# padding=0, dilation=1, groups=1,
# bias=True, padding_mode='zeros'
for d in (1, 2, 3):
if d == 3 and padding_mode == 'reflect':
# FIXME: remove after implementing reflection pad 3d
# https://github.com/pytorch/pytorch/issues/27655
continue
padding = tuple(range(1, d + 1))
cpp_padding = '{' + ', '.join(map(str, padding)) + '}'
input_size = (2, 2) + (4,) * d
output_size = (2, 3) + tuple(p + 1 for p in padding) # simplified from `(4 + 2 * p - 3) // 2 + 1`
new_module_tests.append(
dict(
module_name='Conv{}d'.format(d),
constructor_args=(2, 3, 3, 2, padding, 1, 1, True, padding_mode),
cpp_constructor_args='''torch::nn::Conv{}dOptions(2, 3, 3)
.stride(2)
.padding({})
.dilation(1)
.groups(1)
.bias(true)
.padding_mode({})'''.format(d, cpp_padding, cpp_padding_mode),
input_size=input_size,
output_size=output_size,
cudnn=True,
desc='{}_stride2_pad2'.format(padding_mode),
with_tf32=True,
tf32_precision=0.05
),
)
# Check that non linear activations work with no batch dimensions
non_linear_activations_no_batch = [
'ELU', 'Hardshrink', 'Hardsigmoid', 'Hardtanh', 'Hardswish', 'LeakyReLU',
'LogSigmoid', 'PReLU', 'ReLU', 'ReLU6', 'RReLU', 'SELU', 'CELU', 'GELU', 'GLU',
'Sigmoid', 'SiLU', 'Mish', 'Softplus', 'Softshrink', 'Softsign', 'Tanh',
'Tanhshrink', 'Threshold'
]
non_linear_activations_extra_info: Dict[str, dict] = {
'CELU': {'constructor_args': (2.,)},
'Threshold': {'constructor_args': (2., 1.)},
'Hardsigmoid': {'check_gradgrad': False, 'check_jit': False},
'Hardswish': {'check_gradgrad': False, 'check_jit': False},
# For RRelu, test that compare CPU and GPU results fail because RNG
# is different between CPU and GPU
'RReLU': {'test_cuda': False},
}
for non_linear_activation in non_linear_activations_no_batch:
activation_test_info = dict(
module_name=non_linear_activation,
input_size=(4,),
reference_fn=single_batch_reference_fn,
desc='no_batch_dim',
test_cpp_api_parity=False,
)
extra_info = non_linear_activations_extra_info.get(non_linear_activation, {})
activation_test_info.update(extra_info)
new_module_tests.append(activation_test_info)
def kldivloss_reference(input, target, reduction='mean'):
result = target * (target.log() - input)
if reduction == 'mean':
return result.mean()
elif reduction == 'sum':
return result.sum()
elif reduction == 'batchmean' and result.dim() != 0:
return result.sum() / result.size(0)
return result
def kldivloss_log_target_reference(input, target, reduction='mean'):
result = torch.exp(target) * (target - input)
if reduction == 'mean':
return result.mean()
elif reduction == 'sum':
return result.sum()
elif reduction == 'batchmean' and result.dim() != 0:
return result.sum() / result.size(0)
return result
def nlllossNd_reference(input, target, weight=None, ignore_index=-100,
reduction='mean'):
assert input.dim() >= 3
N = input.size(0)
C = input.size(1)
out_size = (N,) + input.size()[2:]
output = torch.zeros(out_size).type_as(input)
if weight is None:
weight = torch.ones(C).type_as(input)
total_weight = 0
for tup in product(*[range(size) for size in out_size]):
t_nx = target[tup]
norm = 0. if ignore_index == t_nx else weight[t_nx].item()
input_index = list(tup)
input_index.insert(1, t_nx)
output[tup] = -input[tuple(input_index)] * norm
total_weight += norm
if reduction == 'mean':
return output.sum() / total_weight
elif reduction == 'sum':
return output.sum()
return output
def cross_entropy_loss_prob_target_reference(input, target, weight=None, reduction='mean',
label_smoothing=0.0):
assert input.dim() >= 2
input = torch.log_softmax(input, 1)
C = input.size(1)
if weight is None:
weight = torch.ones(C).type_as(input)
weight = weight.view(1, C, *(1 for _ in input.shape[2:]))
if label_smoothing > 0.0:
assert label_smoothing <= 1.0
target = (target * (1 - label_smoothing) + label_smoothing / C)
output = -(input * target * weight).sum(dim=1)
if reduction == 'mean':
return output.mean()
elif reduction == 'sum':
return output.sum()
return output
def cross_entropy_loss_indices_target_reference(input, target, weight=None, ignore_index=-100,
reduction='mean', label_smoothing=0.0):
log_softmax_input = torch.log_softmax(input, 1)
nllloss = F.nll_loss(
log_softmax_input,
target,
weight,
ignore_index=ignore_index,
reduction=reduction)
if label_smoothing == 0.0:
return nllloss
assert 0.0 < label_smoothing <= 1.0
input = torch.log_softmax(input, 1)
C = input.size(1)
if weight is not None:
input = input * weight.view(1, C, *(1 for _ in input.shape[2:]))
smooth_loss = -torch.sum(input, 1)
ignore_mask = target == ignore_index
smooth_loss.masked_fill_(ignore_mask, 0.0)
if reduction == 'mean':
if weight is not None:
# TODO: This code can path can be removed if #61309 is resolved
# loss is normalized by the weights to be consistent with nll_loss_nd
ret = torch.sum(smooth_loss) / weight.gather(0, target.masked_select(ignore_mask.logical_not()).flatten()).sum()
else:
ret = torch.mean(smooth_loss.masked_select(ignore_mask.logical_not()))
elif reduction == 'sum':
ret = torch.sum(smooth_loss)
else:
ret = smooth_loss
return (1 - label_smoothing) * nllloss + ret * (label_smoothing / C)
def cross_entropy_loss_reference(input, target, weight=None, ignore_index=-100, reduction='mean',
label_smoothing=0.0):
if input.shape == target.shape:
return cross_entropy_loss_prob_target_reference(
input,
target,
weight=weight,
reduction=reduction,
label_smoothing=label_smoothing)
else:
return cross_entropy_loss_indices_target_reference(
input, target, weight=weight, reduction=reduction,
ignore_index=ignore_index, label_smoothing=label_smoothing
)
def nllloss_reference(input, target, weight=None, ignore_index=-100,
reduction='mean'):
def nll_loss_helper(input, target, weight, ignore_index):
if target == ignore_index:
return (0, 0)
norm = 1 if weight is None else weight[target]
result = -input[target] * norm
return (result, norm)
losses_and_weights = [nll_loss_helper(i, t, weight, ignore_index)
for i, t in zip(input, target)]
losses, weights = zip(*losses_and_weights)
losses_tensor = input.new_tensor(losses)
if reduction == 'mean':
return sum(losses_tensor) / sum(weights)
elif reduction == 'sum':
return sum(losses_tensor)
else:
return losses_tensor
def smoothl1loss_reference(input, target, reduction='mean', beta=1.0):
abs_diff = (input - target).abs()
ge_beta_mask = (abs_diff >= beta).type_as(abs_diff)
lt_beta_mask = (abs_diff < beta).type_as(abs_diff)
# when beta <= 0 we should just use l1_loss
if beta == 0:
output = abs_diff
else:
output = ge_beta_mask * (abs_diff - 0.5 * beta) + lt_beta_mask * 0.5 * (abs_diff ** 2) / beta
if reduction == 'mean':
return output.mean()
elif reduction == 'sum':
return output.sum()
return output
def huberloss_reference(input, target, reduction='mean', delta=1.0):
abs_diff = (input - target).abs()
ge_delta_mask = (abs_diff >= delta)
lt_delta_mask = (abs_diff < delta)
output = ge_delta_mask * delta * (abs_diff - 0.5 * delta) + lt_delta_mask * 0.5 * (abs_diff ** 2)
if reduction == 'mean':
return output.mean()
elif reduction == 'sum':
return output.sum()
return output
def _multilabelmarginloss_reference(input, target):
targets = []
for target_index in target:
if target_index < 0:
break
targets.append(target_index)
sum = 0
for target_index in targets:
for i in range(0, len(input)):
if i not in targets:
sum += max(0, 1 - input[target_index] + input[i])
return sum
def multilabelmarginloss_reference(input, target, reduction='mean'):
# make everything 2-dimensional
input_dim = input.dim()
if input.dim() < 2:
assert target.dim() < 2
input = input.unsqueeze(0) if input.dim() == 1 else input.unsqueeze(0).unsqueeze(0)
target = target.unsqueeze(0) if target.dim() == 1 else target.unsqueeze(0).unsqueeze(0)
n = input.size(0)
dim = input.size(1)
output = input.new(n).zero_()
for i in range(0, n):
output[i] = _multilabelmarginloss_reference(input[i], target[i])
if reduction == 'mean':
return output.mean() / dim
elif reduction == 'sum':
return output.sum() / dim
elif input_dim < 2:
# we know we have (1, C) X (1, C) -> (1,), so squeeze will get us
# back to correct dimensionality
return output.squeeze() / dim
else:
return output / dim
def hingeembeddingloss_reference(input, target, margin=1.0, reduction='mean'):
margin_clamp = (margin - input).clamp(min=0).type_as(input)
output = torch.where(target == 1, input, margin_clamp)
if reduction == 'mean':
return output.mean()
elif reduction == 'sum':
return output.sum()
return output
def softmarginloss_reference(input, target, reduction='mean'):
output = (1 + (-input * target).exp()).log()
if reduction == 'mean':
return output.mean()
elif reduction == 'sum':
return output.sum()
return output
def _multimarginloss_reference(input, target_idx, p, margin, weight):
if weight is None:
weight = input.new(len(input)).fill_(1)
output = 0
for i in range(0, len(input)):
if i != target_idx:
output += max(0, weight[target_idx] * (margin - input[target_idx] + input[i]) ** p)
return output
def multimarginloss_reference(input, target, p=1, margin=1, weight=None, reduction='mean'):
if input.dim() < 2:
input = input.unsqueeze(0) if input.dim() == 1 else input.unsqueeze(0).unsqueeze(0)
target_dim = target.dim()
if target.dim() == 0:
target = target.unsqueeze(0)
n = input.size(0)
dim = input.size(1)
output = input.new(n)
for x in range(0, n):
output[x] = _multimarginloss_reference(input[x], target[x], p, margin, weight)
if reduction == 'mean':
return output.mean() / dim
elif reduction == 'sum':
return output.sum() / dim
elif target_dim == 0:
return output.squeeze(0) / dim
return output / dim
def cosineembeddingloss_reference(input1, input2, target, margin=0, reduction='mean'):
def _cos(a, b):
cos = a.new(a.size(0))
for i in range(0, a.size(0)):
cos[i] = (a[i] * b[i]).sum() / ((((a[i] * a[i]).sum() + 1e-12) * ((b[i] * b[i]).sum() + 1e-12)) ** 0.5)
return cos
output = torch.where(target == 1, 1 - _cos(input1, input2), (_cos(input1, input2) - margin).clamp(min=0))
if reduction == 'mean':
return output.mean()
elif reduction == 'sum':
return output.sum()
return output
def tripletmarginloss_reference(anchor, positive, negative, margin=1.0, p=2, eps=1e-6, swap=False,
reduction='mean'):
d_p = torch.pairwise_distance(anchor, positive, p, eps)
d_n = torch.pairwise_distance(anchor, negative, p, eps)
if swap:
d_s = torch.pairwise_distance(positive, negative, p, eps)
d_n = torch.min(d_n, d_s)
output = torch.clamp(margin + d_p - d_n, min=0.0)
if reduction == 'mean':
return output.mean()
elif reduction == 'sum':
return output.sum()
return output
def marginrankingloss_reference(input1, input2, target, margin=0, reduction='mean'):
output = (-target * (input1 - input2) + margin).clamp(min=0)
if reduction == 'mean':
return output.mean()
elif reduction == 'sum':
return output.sum()
return output
# this directly follows Graves et al's paper, in contrast to the production implementation, it does not use log-space
def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0, reduction='mean'):
input_lengths = torch.as_tensor(input_lengths, dtype=torch.long)
target_lengths = torch.as_tensor(target_lengths, dtype=torch.long)
dt = log_probs.dtype
log_probs = log_probs.double() # we need the accuracy as we are not in logspace
targets = targets.long()
cum_target_lengths = target_lengths.cumsum(0)
losses = []
for i in range(log_probs.size(1)):
input_length = input_lengths[i].item()
target_length = target_lengths[i].item()
cum_target_length = cum_target_lengths[i].item()
targets_prime = targets.new_full((2 * target_length + 1,), blank)
if targets.dim() == 2:
targets_prime[1::2] = targets[i, :target_length]
else:
targets_prime[1::2] = targets[cum_target_length - target_length:cum_target_length]
probs = log_probs[:input_length, i].exp()
alpha = log_probs.new_zeros((target_length * 2 + 1,))
alpha[0] = probs[0, blank]
alpha[1] = probs[0, targets_prime[1]]
mask_third = (targets_prime[:-2] != targets_prime[2:])
for t in range(1, input_length):
alpha_next = alpha.clone()
alpha_next[1:] += alpha[:-1]
alpha_next[2:] += torch.where(mask_third, alpha[:-2], alpha.new_zeros(1))
alpha = probs[t, targets_prime] * alpha_next
losses.append(-alpha[-2:].sum().log()[None])
output = torch.cat(losses, 0)
if reduction == 'mean':
return (output / target_lengths.to(dtype=output.dtype, device=output.device)).mean()
elif reduction == 'sum':
return output.sum()
output = output.to(dt)
return output
def padding1d_circular(input, pad):
r""" input:
[[[0., 1., 2.],
[3., 4., 5.]]]
pad: (1, 2)
output:
[[[2., 0., 1., 2., 0., 1.],
[5., 3., 4., 5., 3., 4.]]]
"""
return torch.cat([input[:, :, -pad[0]:], input,
input[:, :, 0:pad[1]]], dim=2)
def padding2d_circular(input, pad):
r"""input:
[[[[0., 1., 2],
[3., 4., 5.]]]]
pad: (1, 2, 2, 1)
output:
[[[[2., 0., 1., 2., 0., 1.],
[5., 3., 4., 5., 3., 4.],
[2., 0., 1., 2., 0., 1.],
[5., 3., 4., 5., 3., 4.],
[2., 0., 1., 2., 0., 1.]]]]
"""
input = torch.cat([input[:, :, -pad[2]:], input, input[:, :, 0:pad[3]]], dim=2)
return torch.cat([input[:, :, :, -pad[0]:], input, input[:, :, :, 0:pad[1]]], dim=3)
def padding3d_circular(input, pad):
r"""input:
[[[[[ 0., 1., 2.],
[ 3., 4., 5.]],
[[ 6., 7., 8.],
[ 9., 10., 11.]]]]]
pad: (1, 2, 2, 1, 1, 2)
output: [[[[[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.]],
[[ 2., 0., 1., 2., 0., 1.],
[ 5., 3., 4., 5., 3., 4.],
[ 2., 0., 1., 2., 0., 1.],
[ 5., 3., 4., 5., 3., 4.],
[ 2., 0., 1., 2., 0., 1.]],
[[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.]],
[[ 2., 0., 1., 2., 0., 1.],
[ 5., 3., 4., 5., 3., 4.],
[ 2., 0., 1., 2., 0., 1.],
[ 5., 3., 4., 5., 3., 4.],
[ 2., 0., 1., 2., 0., 1.]],
[[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.]]]]]
"""
input = torch.cat([input[:, :, -pad[4]:], input, input[:, :, 0:pad[5]]], dim=2)
input = torch.cat([input[:, :, :, -pad[2]:], input, input[:, :, :, 0:pad[3]]], dim=3)
return torch.cat([input[:, :, :, :, -pad[0]:], input, input[:, :, :, :, 0:pad[1]]], dim=4)
loss_reference_fns: Dict['str', Callable] = {
'KLDivLoss': kldivloss_reference,
'KLDivLoss_log_target': kldivloss_log_target_reference,
'NLLLoss': nllloss_reference,
'NLLLossNd': nlllossNd_reference,
'SmoothL1Loss': smoothl1loss_reference,
'HuberLoss': huberloss_reference,
'MultiLabelMarginLoss': multilabelmarginloss_reference,
'HingeEmbeddingLoss': hingeembeddingloss_reference,
'SoftMarginLoss': softmarginloss_reference,
'MultiMarginLoss': multimarginloss_reference,
'CosineEmbeddingLoss': cosineembeddingloss_reference,
'TripletMarginLoss': tripletmarginloss_reference,
'MarginRankingLoss': marginrankingloss_reference,
'CTCLoss': ctcloss_reference,
'CrossEntropyLoss': cross_entropy_loss_reference
}
criterion_tests = [
dict(
module_name='L1Loss',
input_size=(2, 3, 4),
target_fn=lambda: torch.randn((2, 3, 4), requires_grad=True),
reference_fn=lambda i, t, _: 1. / i.numel() *
sum((a - b).abs().sum() for a, b in zip(i, t)),
check_complex=True,
),
dict(
module_name='NLLLoss',
input_fn=lambda: torch.rand(15, 10).log(),
target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(),
reference_fn=lambda i, t, m:
nllloss_reference(i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
check_bfloat16=True,
),
dict(
module_name='NLLLoss',
constructor_args=(None, None, 2),
cpp_constructor_args='torch::nn::NLLLossOptions().weight({}).ignore_index(2)',
input_fn=lambda: torch.rand(15, 10).log(),
target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(),
reference_fn=lambda i, t, _: nllloss_reference(i, t, ignore_index=2),
desc='ignore_index',
check_bfloat16=True,
),
dict(
module_name='NLLLoss',
constructor_args_fn=lambda: (torch.rand(10),),
cpp_constructor_args='torch::nn::NLLLossOptions().weight(torch::rand(10))',
input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),
target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(),
reference_fn=lambda i, t, m:
nllloss_reference(i, t, weight=get_weight(m)),
desc='weights',
check_bfloat16=True,
),
dict(
module_name='NLLLoss',
constructor_args_fn=lambda: (torch.rand(10), None, 2),
cpp_constructor_args='torch::nn::NLLLossOptions().weight(torch::rand(10)).ignore_index(2)',
input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),
target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(),
reference_fn=lambda i, t, m:
nllloss_reference(i, t, weight=get_weight(m), ignore_index=2),
desc='weights_ignore_index',
check_bfloat16=True,
),
dict(
module_name='NLLLoss',
constructor_args_fn=lambda: (torch.rand(10), None, -1),
cpp_constructor_args='torch::nn::NLLLossOptions().weight(torch::rand(10)).ignore_index(-1)',
input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),
target_fn=lambda: torch.empty(15).uniform_().mul(10 + 1).floor().long() - 1,
reference_fn=lambda i, t, m:
nllloss_reference(i, t, weight=get_weight(m), ignore_index=-1),
desc='weights_ignore_index_neg',
check_bfloat16=True,
),
dict(
module_name='KLDivLoss',
input_fn=lambda: torch.rand(10, 10).log(),
target_fn=lambda: torch.rand(10, 10),
reference_fn=lambda i, t, m:
kldivloss_reference(i, t, get_reduction(m)),
check_sum_reduction=True,
),
dict(
module_name='KLDivLoss',
constructor=wraps(nn.KLDivLoss)(partial(nn.KLDivLoss, log_target=True)),
cpp_constructor_args='torch::nn::KLDivLossOptions().log_target(true)',
input_fn=lambda: torch.rand(10, 10).log(),
target_fn=lambda: torch.rand(10, 10).log(),
reference_fn=lambda i, t, m:
kldivloss_log_target_reference(i, t, get_reduction(m)),
check_sum_reduction=True,
desc='log_target',
),
dict(
module_name='MSELoss',
input_size=(2, 3, 4, 5),
target_fn=lambda: torch.randn((2, 3, 4, 5), requires_grad=True),
reference_fn=lambda i, t, m: ((i - t).abs().pow(2).sum() / (i.numel()
if get_reduction(m) == 'mean' else 1)),
check_sum_reduction=True,
),
dict(
module_name='BCELoss',
input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.randn(15, 10).gt(0).double(),
reference_fn=lambda i, t, m: -(t * i.log() + (1 - t) * (1 - i).log()).sum() /
(i.numel() if get_reduction(m) else 1),
check_bfloat16=True,
),
dict(
module_name='BCELoss',
constructor_args_fn=lambda: (torch.rand(10),),
cpp_constructor_args='torch::nn::BCELossOptions().weight(torch::rand(10))',
input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.randn(15, 10).gt(0).double(),
reference_fn=lambda i, t, m: -((t * i.log() + (1 - t) * (1 - i).log()) * get_weight(m)).sum() /
(i.numel() if get_reduction(m) else 1),
desc='weights',
check_bfloat16=True,
),
dict(
module_name='CrossEntropyLoss',
input_size=(15, 10),
target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(),
),
dict(
module_name='CrossEntropyLoss',
constructor_args_fn=lambda: (torch.rand(10),),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().weight(torch::rand(10))',
input_size=(15, 10),
target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(),
desc='weights',
),
dict(
module_name='HingeEmbeddingLoss',
input_size=(10,),
target_fn=lambda: torch.randn(10).gt(0).double().mul_(2).sub(1),
reference_fn=lambda i, t, m:
hingeembeddingloss_reference(i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
),
dict(
module_name='HingeEmbeddingLoss',
constructor_args=(0.5,),
cpp_constructor_args='torch::nn::HingeEmbeddingLossOptions().margin(0.5)',
input_size=(10,),
target_fn=lambda: torch.randn(10).gt(0).double().mul_(2).sub(1),
reference_fn=lambda i, t, m:
hingeembeddingloss_reference(i, t, margin=0.5, reduction=get_reduction(m)),
desc='margin',
check_sum_reduction=True,
),
dict(
module_name='MultiLabelMarginLoss',
input_size=(10,),
target_fn=lambda: torch.rand(10).mul(10).floor().long(),
reference_fn=lambda i, t, m:
multilabelmarginloss_reference(i, t, reduction=get_reduction(m)),
desc="1d",
check_sum_reduction=True,
check_gradgrad=False,
check_bfloat16=True,
),
dict(
module_name='MultiLabelMarginLoss',
input_size=(5, 10),
target_fn=lambda: torch.rand(5, 10).mul(10).floor().long(),
reference_fn=lambda i, t, m:
multilabelmarginloss_reference(i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
check_gradgrad=False,
check_bfloat16=True,
),
dict(
module_name='MultiLabelSoftMarginLoss',
input_size=(5, 10),
target_fn=lambda: torch.rand(5, 10).mul(2).floor(),
reference_fn=lambda i, t, m: -(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()).sum() / i.numel(),
check_gradgrad=False,
),
dict(
module_name='MultiMarginLoss',
input_size=(5, 10),
target_fn=lambda: torch.rand(5).mul(8).floor().long(),
reference_fn=lambda i, t, m:
multimarginloss_reference(i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
check_gradgrad=False,
),
dict(
module_name='MultiMarginLoss',
input_size=(10,),
target_fn=lambda: torch.rand(1).mul(8).floor().long(),
reference_fn=lambda i, t, m:
multimarginloss_reference(i, t, reduction=get_reduction(m)),
desc='1d',
check_sum_reduction=True,
check_gradgrad=False,
),
dict(
module_name='MultiMarginLoss',
constructor_args=(2,),
cpp_constructor_args='torch::nn::MultiMarginLossOptions().p(2)',
input_fn=lambda: torch.rand(5, 10).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.rand(5).mul(8).floor().long(),
reference_fn=lambda i, t, m:
multimarginloss_reference(i, t, p=2, reduction=get_reduction(m)),
desc='p',
check_sum_reduction=True,
check_gradgrad=False,
),
dict(
module_name='MultiMarginLoss',
constructor_args=(1, 0.5),
cpp_constructor_args='torch::nn::MultiMarginLossOptions().p(1).margin(0.5)',
legacy_constructor_args=(1, None, 0.5),
input_size=(5, 10),
target_fn=lambda: torch.rand(5).mul(8).floor().long(),
reference_fn=lambda i, t, m:
multimarginloss_reference(i, t, margin=0.5, reduction=get_reduction(m)),
desc='margin',
check_sum_reduction=True,
check_gradgrad=False,
),
dict(
module_name='MultiMarginLoss',
constructor_args=(1, 1., torch.rand(10).double()),
cpp_constructor_args='torch::nn::MultiMarginLossOptions().p(1).margin(1.).weight(torch::rand(10))',
legacy_constructor_args=(1, torch.rand(10).double()),
input_size=(5, 10),
target_fn=lambda: torch.rand(5).mul(8).floor().long(),
reference_fn=lambda i, t, m:
multimarginloss_reference(i, t, weight=get_weight(m), reduction=get_reduction(m)),
desc='weights',
check_sum_reduction=True,
check_gradgrad=False,
),
dict(
module_name='SmoothL1Loss',
input_size=(5, 10),
target_fn=lambda: torch.randn((5, 10), requires_grad=True),
check_sum_reduction=True,
reference_fn=lambda i, t, m, b=1.0:
smoothl1loss_reference(i, t, reduction=get_reduction(m), beta=b),
),
dict(
module_name='HuberLoss',
input_size=(5, 10),
target_fn=lambda: torch.randn((5, 10), requires_grad=True),
check_sum_reduction=True,
check_half=True,
check_bfloat16=True,
reference_fn=lambda i, t, m:
huberloss_reference(i, t, reduction=get_reduction(m)),
),
dict(
module_name='SoftMarginLoss',
input_size=(5, 5),
target_fn=lambda: torch.randn(5, 5).sign(),
reference_fn=lambda i, t, m:
softmarginloss_reference(i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
),
dict(
module_name='CosineEmbeddingLoss',
input_fn=lambda: (torch.rand(15, 10), torch.rand(15, 10)),
target_fn=lambda: torch.randn(15).sign(),
reference_fn=lambda i, t, m:
cosineembeddingloss_reference(i[0], i[1], t, reduction=get_reduction(m)),
check_sum_reduction=True,
),
dict(
module_name='CosineEmbeddingLoss',
constructor_args=(0.7,),
cpp_constructor_args='torch::nn::CosineEmbeddingLossOptions().margin(0.7)',
input_fn=lambda: (torch.rand(15, 10), torch.rand(15, 10)),
target_fn=lambda: torch.randn(15).sign(),
reference_fn=lambda i, t, m:
cosineembeddingloss_reference(i[0], i[1], t, margin=0.7, reduction=get_reduction(m)),
desc='margin',
check_sum_reduction=True,
),
dict(
module_name='MarginRankingLoss',
input_fn=lambda: (torch.randn(50).mul(10), torch.randn(50).mul(10)),
target_fn=lambda: torch.randn(50).sign(),
reference_fn=lambda i, t, m:
marginrankingloss_reference(i[0], i[1], t, reduction=get_reduction(m)),
check_sum_reduction=True,
),
dict(
module_name='MarginRankingLoss',
constructor_args=(0.5,),
cpp_constructor_args='torch::nn::MarginRankingLossOptions().margin(0.5)',
input_fn=lambda: (torch.randn(50).mul(10), torch.randn(50).mul(10)),
target_fn=lambda: torch.randn(50).sign(),
reference_fn=lambda i, t, m:
marginrankingloss_reference(i[0], i[1], t, margin=0.5, reduction=get_reduction(m)),
desc='margin',
check_sum_reduction=True,
),
dict(
module_name='BCEWithLogitsLoss',
input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.randn(15, 10).gt(0).double(),
),
dict(
module_name='BCEWithLogitsLoss',
constructor_args=(torch.rand(10),),
cpp_constructor_args='torch::nn::BCEWithLogitsLossOptions().weight(torch::rand(10))',
input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.randn(15, 10).gt(0).double(),
desc='weights',
),
dict(
module_name='BCEWithLogitsLoss',
constructor_args=(torch.rand(()),),
cpp_constructor_args='torch::nn::BCEWithLogitsLossOptions().weight(torch::rand({}))',
input_fn=lambda: torch.rand(()).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.randn(()).gt(0).double(),
desc='scalar_weights'
),
dict(
module_name='NLLLoss',
input_size=(2, 3, 5, 5),
target_fn=lambda: torch.rand(2, 5, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
desc='2d',
check_bfloat16=True,
),
dict(
module_name='NLLLoss',
constructor_args_fn=lambda: (torch.rand(3),),
cpp_constructor_args='torch::nn::NLLLossOptions().weight(torch::rand(3))',
input_size=(2, 3, 5, 5),
target=torch.rand(2, 5, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['NLLLossNd'](i, t, weight=get_weight(m)),
desc='2d_weights',
check_bfloat16=True,
),
dict(
module_name='NLLLoss',
constructor_args=(None, None, 1),
cpp_constructor_args='torch::nn::NLLLossOptions().weight({}).ignore_index(1)',
input_size=(2, 3, 5, 5),
target_fn=lambda: torch.rand(2, 5, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['NLLLossNd'](i, t, ignore_index=1),
desc='2d_ignore_index',
check_bfloat16=True,
),
dict(
module_name='NLLLoss',
input_size=(2, 3, 5, 5, 2, 2),
target_fn=lambda: torch.rand(2, 5, 5, 2, 2).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
desc='higher_dim',
check_bfloat16=True,
),
dict(
module_name='NLLLoss',
input_size=(2, 3, 5),
target_fn=lambda: torch.rand(2, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
desc='dim_is_3',
check_bfloat16=True,
),
dict(
module_name='CrossEntropyLoss',
input_size=(2, 3, 5, 5),
target_fn=lambda: torch.rand(2, 5, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
desc='2d',
check_bfloat16=False,
),
dict(
module_name='CrossEntropyLoss',
constructor_args_fn=lambda: (torch.rand(3),),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().weight(torch::rand(3))',
input_size=(2, 3, 5, 5),
target=torch.rand(2, 5, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, weight=get_weight(m)),
desc='2d_weights',
check_bfloat16=False,
),
dict(
module_name='CrossEntropyLoss',
constructor_args=(None, None, 1),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().weight({}).ignore_index(1)',
input_size=(2, 3, 5, 5),
target_fn=lambda: torch.rand(2, 5, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, ignore_index=1),
desc='2d_ignore_index',
check_bfloat16=False,
),
dict(
module_name='CrossEntropyLoss',
input_size=(2, 3, 5, 5, 2, 2),
target_fn=lambda: torch.rand(2, 5, 5, 2, 2).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
desc='higher_dim',
check_bfloat16=False,
),
dict(
module_name='CrossEntropyLoss',
input_size=(2, 3, 5),
target_fn=lambda: torch.rand(2, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
desc='dim_is_3',
check_bfloat16=False,
),
dict(
module_name='CrossEntropyLoss',
input_size=(5, 3),
target_fn=lambda: torch.rand(5, 3).softmax(dim=1),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
desc='2d_prob_target',
check_bfloat16=False,
),
dict(
module_name='CrossEntropyLoss',
input_size=(5, 3, 4),
target_fn=lambda: torch.rand(5, 3, 4).softmax(dim=1),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
desc='3d_prob_target',
check_bfloat16=False,
),
dict(
module_name='CrossEntropyLoss',
input_size=(5, 3, 4, 2),
target_fn=lambda: torch.rand(5, 3, 4, 2).softmax(dim=1),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
desc='4d_prob_target',
check_bfloat16=False,
),
dict(
fullname='CrossEntropyLoss_2d_prob_target_smoothing_sum_reduction',
constructor=lambda *args, **kwargs: nn.CrossEntropyLoss(reduction='sum',
label_smoothing=0.15),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).reduction(torch::kSum)',
input_size=(5, 3),
target_fn=lambda: torch.rand(5, 3).softmax(dim=1),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15),
check_bfloat16=False,
),
dict(
fullname='CrossEntropyLoss_2d_prob_target_smoothing',
constructor=lambda *args: nn.CrossEntropyLoss(label_smoothing=0.15),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15)',
input_size=(5, 3),
target_fn=lambda: torch.rand(5, 3).softmax(dim=1),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15),
check_bfloat16=False,
),
dict(
fullname='CrossEntropyLoss_2d_prob_target_smoothing_weight',
constructor_args_fn=lambda: (torch.rand(3).abs(),),
constructor=lambda weight: nn.CrossEntropyLoss(weight, label_smoothing=0.15),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).weight(torch::rand(3).abs())',
input_size=(5, 3),
target_fn=lambda: torch.rand(5, 3).softmax(dim=1),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), weight=get_weight(m), label_smoothing=0.15),
check_bfloat16=False,
),
dict(
fullname='CrossEntropyLoss_3d_prob_target_smoothing_sum_reduction',
constructor=lambda *args: nn.CrossEntropyLoss(reduction='sum',
label_smoothing=0.15),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).reduction(torch::kSum)',
input_size=(5, 3, 4),
target_fn=lambda: torch.rand(5, 3, 4).softmax(dim=1),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15),
check_bfloat16=False,
),
dict(
fullname='CrossEntropyLoss_3d_prob_target_smoothing',
constructor=lambda *args: nn.CrossEntropyLoss(label_smoothing=0.15),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15)',
input_size=(5, 3, 4),
target_fn=lambda: torch.rand(5, 3, 4).softmax(dim=1),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15),
check_bfloat16=False,
),
dict(
fullname='CrossEntropyLoss_3d_indices_target_smoothing',
constructor=lambda *args: nn.CrossEntropyLoss(label_smoothing=0.15),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15)',
input_size=(2, 3, 5),
target_fn=lambda: torch.rand(2, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15),
check_bfloat16=False,
),
dict(
fullname='CrossEntropyLoss_3d_indices_target_smoothing_ignore_index',
constructor=lambda *args: nn.CrossEntropyLoss(label_smoothing=0.15, ignore_index=1),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).ignore_index(1)',
input_size=(2, 3, 5),
target_fn=lambda: torch.rand(2, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15, ignore_index=1),
check_bfloat16=False,
),
dict(
fullname='CrossEntropyLoss_3d_indices_target_smoothing_sum_reduction',
constructor=lambda *args: nn.CrossEntropyLoss(reduction='sum', label_smoothing=0.15),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).reduction(torch::kSum)',
input_size=(2, 3, 5),
target_fn=lambda: torch.rand(2, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15),
check_bfloat16=False,
),
dict(
fullname='CrossEntropyLoss_3d_indices_target_smoothing_sum_reduction_ignore_index',
constructor=lambda *args: nn.CrossEntropyLoss(reduction='sum', label_smoothing=0.15,
ignore_index=1),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).reduction(torch::kSum).ignore_index(1)',
input_size=(2, 3, 5),
target_fn=lambda: torch.rand(2, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15, ignore_index=1),
check_bfloat16=False,
),
dict(
fullname='CrossEntropyLoss_2d_indices_target_smoothing',
constructor=lambda *args: nn.CrossEntropyLoss(label_smoothing=0.15),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15)',
input_size=(15, 10),
target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15),
check_bfloat16=False,
),
dict(
fullname='CrossEntropyLoss_2d_indices_target_smoothing_sum_reduction',
constructor=lambda *args: nn.CrossEntropyLoss(reduction='sum', label_smoothing=0.15),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).reduction(torch::kSum)',
input_size=(15, 10),
target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15),
check_bfloat16=False,
),
dict(
fullname='CrossEntropyLoss_2d_indices_target_smoothing_ignore_index',
constructor=lambda *args: nn.CrossEntropyLoss(label_smoothing=0.15, ignore_index=3),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).ignore_index(3)',
input_size=(15, 10),
target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15, ignore_index=3),
check_bfloat16=False,
),
dict(
fullname='CrossEntropyLoss_2d_indices_target_smoothing_weight',
constructor_args_fn=lambda: (torch.rand(10).abs(),),
constructor=lambda weight: nn.CrossEntropyLoss(weight, label_smoothing=0.15),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).weight(torch::rand(10).abs())',
input_size=(15, 10),
target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), weight=get_weight(m), label_smoothing=0.15),
check_bfloat16=False,
),
dict(
module_name='CrossEntropyLoss',
constructor_args_fn=lambda: (torch.rand(3),),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().weight(torch::rand(3))',
input_size=(5, 3),
target_fn=lambda: torch.rand(5, 3).softmax(dim=1),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), weight=get_weight(m)),
check_sum_reduction=True,
desc='2d_prob_target_weights',
check_bfloat16=False,
),
dict(
module_name='CrossEntropyLoss',
constructor_args_fn=lambda: (torch.rand(3),),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().weight(torch::rand(3))',
input_size=(5, 3, 4),
target_fn=lambda: torch.rand(5, 3, 4).softmax(dim=1),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), weight=get_weight(m)),
check_sum_reduction=True,
desc='3d_prob_target_weights',
check_bfloat16=False,
),
dict(
module_name='CrossEntropyLoss',
constructor_args_fn=lambda: (torch.rand(3),),
cpp_constructor_args='torch::nn::CrossEntropyLossOptions().weight(torch::rand(3))',
input_size=(5, 3, 4, 2),
target_fn=lambda: torch.rand(5, 3, 4, 2).softmax(dim=1),
reference_fn=lambda i, t, m:
loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), weight=get_weight(m)),
check_sum_reduction=True,
desc='4d_prob_target_weights',
check_bfloat16=False,
),
dict(
module_name='PoissonNLLLoss', # Default is log_input=True, full=False
input_size=(2, 3, 4, 5),
target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(),
reference_fn=lambda i, t, _: (i.exp() - t.mul(i)).mean(),
desc='no_full_loss',
),
dict(
module_name='PoissonNLLLoss',
constructor_args=(False, False), # log_input=False, full=False
cpp_constructor_args='torch::nn::PoissonNLLLossOptions().log_input(false).full(false)',
input_fn=lambda: torch.randn(2, 3, 4, 5).abs_().add_(0.001),
target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(),
reference_fn=lambda i, t, _: (i - t.mul((i + 1e-8).log())).mean(),
desc='no_full_loss_no_log_input',
),
dict(
module_name='PoissonNLLLoss',
constructor_args=(True, True), # log_input=True, full=True
cpp_constructor_args='torch::nn::PoissonNLLLossOptions().log_input(true).full(true)',
input_size=(2, 3, 4, 5),
target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(),
reference_fn=lambda i, t, _:
(i.exp() - t.mul(i) + (t.mul(t.log()) - t + 0.5 * (2. * pi * t).log()).masked_fill(t <= 1, 0)).mean(),
desc='full_loss',
),
dict(
module_name='PoissonNLLLoss',
constructor_args=(False, True), # log_input=False, full=True
cpp_constructor_args='torch::nn::PoissonNLLLossOptions().log_input(false).full(true)',
input_fn=lambda: torch.randn(2, 3, 4, 5).abs_().add_(0.001),
target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(),
reference_fn=lambda i, t, _: (
i - t.mul((i + 1e-8).log()) + (t.mul(t.log()) - t + 0.5 * (2. * pi * t).log()).masked_fill(t <= 1, 0)
).mean(),
desc='full_loss_no_log_input',
),
dict(
module_name='L1Loss',
input_size=(),
target_fn=lambda: torch.randn((), requires_grad=True),
reference_fn=lambda i, t, _: 1. / i.numel() * (i - t).abs().sum(),
desc='scalar',
check_complex=True,
),
dict(
module_name='KLDivLoss',
input_fn=lambda: torch.rand(()).log(),
target_fn=lambda: torch.rand(()),
reference_fn=lambda i, t, m:
kldivloss_reference(i, t, get_reduction(m)),
check_sum_reduction=True,
desc='scalar',
),
dict(
module_name='KLDivLoss',
constructor=wraps(nn.KLDivLoss)(partial(nn.KLDivLoss, log_target=True)),
cpp_constructor_args='torch::nn::KLDivLossOptions().log_target(true)',
input_fn=lambda: torch.rand(()).log(),
target_fn=lambda: torch.rand(()).log(),
reference_fn=lambda i, t, m:
kldivloss_log_target_reference(i, t, get_reduction(m)),
check_sum_reduction=True,
desc='scalar_log_target',
),
dict(
module_name='MSELoss',
input_size=(),
target_fn=lambda: torch.randn((), requires_grad=True),
reference_fn=lambda i, t, m: ((i - t).abs().pow(2).sum() /
(i.numel() if get_reduction(m) == 'mean' else 1)),
check_sum_reduction=True,
desc='scalar',
check_bfloat16=True,
),
dict(
module_name='MSELoss',
input_fn=lambda: torch.ones(5, 68, 64, 64, dtype=torch.float) / 10,
target_fn=lambda: torch.zeros(5, 68, 64, 64, dtype=torch.float),
reference_fn=lambda i, t, m: ((i - t).abs().pow(2).sum() /
(i.numel() if get_reduction(m) == 'mean' else 1)),
check_forward_only=True,
desc='prec',
check_bfloat16=True,
),
dict(
module_name='BCELoss',
constructor_args_fn=lambda: (torch.rand(()),),
cpp_constructor_args='torch::nn::BCELossOptions().weight(torch::rand({}))',
input_fn=lambda: torch.rand(()).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.rand(()).gt(0).double(),
reference_fn=lambda i, t, m: -((t * i.log() + (1 - t) * (1 - i).log()) * get_weight(m)).sum() /
(i.numel() if get_reduction(m) == 'mean' else 1),
desc='scalar_weights',
check_bfloat16=True,
),
dict(
module_name='HingeEmbeddingLoss',
constructor_args=(0.5,),
cpp_constructor_args='torch::nn::HingeEmbeddingLossOptions().margin(0.5)',
input_size=(),
target_fn=lambda: torch.randn(()).gt(0).double().mul_(2).sub(1),
desc='scalar_margin',
check_sum_reduction=True,
),
dict(
module_name='SmoothL1Loss',
input_size=(),
target_fn=lambda: torch.randn((), requires_grad=True),
check_sum_reduction=True,
reference_fn=lambda i, t, m, b=1.0:
smoothl1loss_reference(i, t, reduction=get_reduction(m), beta=b),
desc='scalar',
),
dict(
module_name='MultiLabelSoftMarginLoss',
constructor_args=(torch.rand(10),),
cpp_constructor_args='torch::nn::MultiLabelSoftMarginLossOptions().weight(torch::rand(10))',
input_fn=lambda: torch.randn(5, 10),
target_fn=lambda: torch.rand(5, 10).mul(2).floor(),
reference_fn=lambda i, t, m: -((t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()) * get_weight(m)).sum() /
(i.numel() if get_reduction(m) == 'mean' else i.size(1) if get_reduction(m) == 'sum' else 1),
desc='weights',
check_sum_reduction=True,
check_gradgrad=False,
),
dict(
module_name='CTCLoss',
constructor_args=(14,), # blank=14
extra_args=([50, 50, 50], [30, 25, 20]), # input_lengths, target_lengths
input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2),
target_fn=lambda: torch.randint(0, 14, (3, 30), dtype=torch.long),
reference_fn=lambda i, t, il, tl, m:
ctcloss_reference(i, t, il, tl, blank=14, reduction=get_reduction(m)),
desc='lengths_intlists',
check_forward_only=True,
check_sum_reduction=True,
check_gradgrad=False,
check_half=False,
# `CTCLoss` in C++ frontend doesn't accept integer list for `input_lengths` or `target_lengths`
test_cpp_api_parity=False,
check_jit=False,
),
dict(
module_name='CTCLoss',
constructor_args=(14,), # blank=14
cpp_constructor_args='torch::nn::CTCLossOptions().blank(14)',
extra_args=(torch.tensor([50, 50, 50]), torch.tensor([30, 25, 20])), # input_lengths, target_lengths
input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2),
target_fn=lambda: torch.randint(0, 14, (3, 30), dtype=torch.long),
reference_fn=lambda i, t, il, tl, m:
ctcloss_reference(i, t, il, tl, blank=14, reduction=get_reduction(m)),
desc='lengths_tensors',
check_forward_only=True,
check_sum_reduction=True,
check_gradgrad=False,
check_half=False,
),
# Test is flaky
# See https://github.com/pytorch/pytorch/issues/29380.
# dict(
# module_name='CTCLoss',
# desc='1d_target',
# constructor_args=(14,), # blank=14
# extra_args=([50, 50, 50], [30, 25, 20]), # input_lengths, target_lengths
# input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2),
# target_fn=lambda: torch.randint(0, 14, (3, 30), dtype=torch.long),
# reference_fn=lambda i, t, il, tl, m:
# ctcloss_reference(i, t, il, tl, blank=14, reduction=get_reduction(m)),
# check_sum_reduction=True,
# check_gradgrad=False,
# check_half=False,
# ),
dict(
module_name='CTCLoss',
desc='2d_int_target_lengths_intlists',
constructor_args=(0,), # blank=0
extra_args=([50, 50, 50], [30, 25, 20]), # input_lengths, target_lengths
input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2),
target_fn=lambda: torch.randint(1, 15, (3, 30), dtype=torch.int),
reference_fn=lambda i, t, il, tl, m:
ctcloss_reference(i, t, il, tl, blank=0, reduction=get_reduction(m)),
check_forward_only=True,
check_sum_reduction=True,
check_gradgrad=False,
check_half=False,
# `CTCLoss` in C++ frontend doesn't accept integer list for `input_lengths` or `target_lengths`
test_cpp_api_parity=False,
check_jit=False,
),
dict(
module_name='CTCLoss',
desc='2d_int_target_lengths_tensors',
constructor_args=(0,), # blank=0
cpp_constructor_args='torch::nn::CTCLossOptions().blank(0)',
extra_args=(torch.tensor([50, 50, 50]), torch.tensor([30, 25, 20])), # input_lengths, target_lengths
input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2),
target_fn=lambda: torch.randint(1, 15, (3, 30), dtype=torch.int),
reference_fn=lambda i, t, il, tl, m:
ctcloss_reference(i, t, il, tl, blank=0, reduction=get_reduction(m)),
check_forward_only=True,
check_sum_reduction=True,
check_gradgrad=False,
check_half=False,
),
dict(
module_name='CTCLoss',
desc='2d_lengths_tensors',
constructor_args=(0,), # blank=0
cpp_constructor_args='torch::nn::CTCLossOptions().blank(0)',
extra_args=(torch.tensor([50, 50, 50]), torch.tensor([30, 25, 20])), # input_lengths, target_lengths
input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2),
target_fn=lambda: torch.randint(1, 15, (3, 30), dtype=torch.int),
reference_fn=lambda i, t, il, tl, m:
ctcloss_reference(i, t, il, tl, blank=0, reduction=get_reduction(m)),
check_forward_only=True,
check_sum_reduction=True,
check_gradgrad=False,
check_half=False,
),
]
def single_batch_reference_criterion_fn(*args):
"""Reference function for criterion supporting no batch dimensions.
The criterion is passed the input and target in batched form with a single item.
The output is squeezed to compare with the no-batch input.
"""
criterion = args[-1]
def unsqueeze_inp(inp):
if isinstance(inp, (list, tuple)):
return [t.unsqueeze(0) for t in inp]
return inp.unsqueeze(0)
def flatten(xs):
result = []
if isinstance(xs, (list, tuple)):
for x in xs:
result.extend(flatten(x))
else:
result.append(xs)
return result
single_batch_input_args = flatten([unsqueeze_inp(input) for input in args[:-1]])
output = criterion(*single_batch_input_args)
reduction = get_reduction(criterion)
if reduction == 'none':
return output.squeeze(0)
# reduction is 'sum' or 'mean' which results in a scalar
return output
# Check that regression criterion work with no batch dimensions
regression_criterion_no_batch = [
'L1Loss', 'MSELoss', 'PoissonNLLLoss', 'HuberLoss', 'SmoothL1Loss'
]
reductions = ['none', 'mean', 'sum']
for name, reduction in product(regression_criterion_no_batch, reductions):
regression_test_info = dict(
fullname="{}_no_batch_dim_{}".format(name, reduction),
constructor=lambda *args, name=name: getattr(nn, name)(reduction=reduction),
input_size=(3, ),
target_size=(3, ),
reference_fn=single_batch_reference_criterion_fn,
test_cpp_api_parity=False,
)
criterion_tests.append(regression_test_info)
for reduction in reductions:
regression_test_info = dict(
fullname=f"KLDivLoss_no_batch_dim_{reduction}",
constructor=lambda: nn.KLDivLoss(reduction=reduction),
input_fn=lambda: torch.rand((3,)).log(),
target_fn=lambda: torch.rand((3,)),
reference_fn=single_batch_reference_criterion_fn,
test_cpp_api_parity=False,
)
criterion_tests.append(regression_test_info)
# Check that classification criterion work with no batch dimensions
# List of tuples of (name, input_fn, target_fn)
classification_criterion_no_batch = [
('BCELoss', lambda: torch.sigmoid(torch.randn(9)), lambda: torch.randn(9)),
('BCEWithLogitsLoss', lambda: torch.randn(9), lambda: torch.randn(9)),
('HingeEmbeddingLoss', lambda: torch.randn(9), lambda: torch.tensor([-1, 1, 1] * 3)),
('MultiLabelMarginLoss', lambda: torch.randn(4), lambda: torch.tensor([3, 0, -1, 1])),
('SoftMarginLoss', lambda: torch.randn(9), lambda: torch.tensor([-1, 1, 1] * 3)),
('NLLLoss', lambda: F.log_softmax(torch.randn(3), dim=0), lambda: torch.tensor(1)),
('CosineEmbeddingLoss', lambda: (torch.randn(9), torch.randn(9)), lambda: torch.tensor(1)),
# For MarginRankingLoss, input_fn : (x1, x2) and target_fn : target
('MarginRankingLoss', lambda: (torch.randn(()), torch.randn(())), lambda: torch.randn(()).sign()),
# For TripletMarginLoss, input_fn : (anchor, positive) and target_fn : negative
('TripletMarginLoss', lambda: (torch.randn(9), torch.randn(9)), lambda: torch.randn(9)),
('MultiLabelSoftMarginLoss', lambda: torch.randn(9), lambda: torch.randn(9)),
]
classification_criterion_no_batch_extra_info: Dict[str, dict] = {
'MultiLabelMarginLoss': {'check_gradgrad': False},
}
# TODO : Fix these discrepancies
classification_cpp_parity = {
'BCELoss': False,
'BCEWithLogitsLoss': False,
'HingeEmbeddingLoss': False,
'NLLLoss': False,
'SoftMarginLoss': False,
}
reductions = ['none', 'mean', 'sum']
for (name, input_fn, target_fn), reduction in product(classification_criterion_no_batch,
reductions):
classification_test_info = dict(
fullname="{}_no_batch_dim_{}".format(name, reduction),
constructor=lambda *args, name=name: getattr(nn, name)(reduction=reduction),
input_fn=lambda f=input_fn: f(),
target_fn=lambda f=target_fn: f(),
reference_fn=single_batch_reference_criterion_fn,
test_cpp_api_parity=True,
has_parity=classification_cpp_parity.get(name, True)
)
extra_info = classification_criterion_no_batch_extra_info.get(name, {})
classification_test_info.update(extra_info)
criterion_tests.append(classification_test_info)
class NNTestCase(TestCase):
# _forward is defined in classes inheriting from NNTestCase
@abstractmethod
def _forward(self, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def _get_parameters(self, module: nn.Module) -> Tuple[List[nn.Parameter], List[nn.Parameter]]:
raise NotImplementedError
@abstractmethod
def _zero_grad_parameters(self, module: nn.Module) -> None:
raise NotImplementedError
@abstractmethod
def _backward(self, module: nn.Module,
input: _TensorOrTensors, output: torch.Tensor,
grad_output: Union[torch.Tensor, Sequence[torch.Tensor]],
create_graph: bool = False):
raise NotImplementedError
def _jacobian(self, input, num_out):
if isinstance(input, tuple):
return tuple(self._jacobian(elem, num_out) for elem in input)
elif isinstance(input, list):
return [self._jacobian(elem, num_out) for elem in input]
else:
return torch.zeros(input.nelement(), num_out)
def _flatten_tensors(self, x):
if isinstance(x, torch.Tensor):
if x.is_sparse:
return x.to_dense().view(-1)
else:
return x.view(-1)
else:
return tuple(self._flatten_tensors(a) for a in x)
def _zero_grad_input(self, input):
if isinstance(input, torch.Tensor):
if input.requires_grad and input.grad is not None:
input.grad.zero_()
input.grad.detach_()
else:
for i in input:
self._zero_grad_input(i)
def _analytical_jacobian(self, module, input: _TensorOrTensors, jacobian_input=True, jacobian_parameters=True):
output = self._forward(module, input)
output_size = output.nelement()
if jacobian_input:
jacobian_inp = self._jacobian(input, output_size)
flat_jacobian_input = list(_iter_tensors(jacobian_inp))
if jacobian_parameters:
num_param = sum(p.numel() for p in self._get_parameters(module)[0])
jacobian_param = torch.zeros(num_param, output_size)
for i in range(output_size):
param, d_param = self._get_parameters(module)
# make non grad zeros
d_param = [torch.zeros_like(p) if d is None else d for (p, d) in zip(param, d_param)]
d_out = torch.zeros_like(output)
flat_d_out = d_out.view(-1)
flat_d_out[i] = 1
if jacobian_parameters:
self._zero_grad_parameters(module)
# Tensors will accumulate gradient from multiple steps
if jacobian_input:
self._zero_grad_input(input)
d_input = self._backward(module, input, output, d_out)
if jacobian_input:
for jacobian_x, d_x in zip(flat_jacobian_input, _iter_tensors(d_input)):
jacobian_x[:, i] = d_x.contiguous().view(-1)
if jacobian_parameters:
jacobian_param[:, i] = torch.cat(self._flatten_tensors(d_param), 0)
res: Tuple[torch.Tensor, ...] = tuple()
if jacobian_input:
res += jacobian_inp,
if jacobian_parameters:
res += jacobian_param,
return res
def _numerical_jacobian(self, module, input: _TensorOrTensors, jacobian_input=True, jacobian_parameters=True):
def fw(*input):
return self._forward(module, input).detach()
res: Tuple[torch.Tensor, ...] = tuple()
if jacobian_input:
res += _get_numerical_jacobian(fw, input, eps=1e-6),
if jacobian_parameters:
param, _ = self._get_parameters(module)
to_cat = []
for p in param:
jacobian = _get_numerical_jacobian(fw, input, target=p, eps=1e-6)
# get_numerical_jacobian returns a list of tuples but we require a tensor
to_cat.append(jacobian[0][0])
res += (torch.cat(to_cat, 0),)
return res
def check_jacobian(self, module, input: _TensorOrTensors, jacobian_input=True):
jacobian_parameters = bool(self._get_parameters(module)[0])
analytical = self._analytical_jacobian(module, input, jacobian_input, jacobian_parameters)
numerical = self._numerical_jacobian(module, input, jacobian_input, jacobian_parameters)
analytical_t = list(_iter_tensors(analytical))
numerical_t = list(_iter_tensors(numerical))
differences = []
for a, n in zip(analytical_t, numerical_t):
if a.numel() != 0:
differences.append(a.add(n, alpha=-1).abs().max())
# TODO: compare structure (ensure analytic jacobian has correct shape)
if len(differences) > 0:
self.assertLessEqual(max(differences), PRECISION) # type: ignore[type-var]
class TestBase(object):
_required_arg_names = {'constructor_args', 'input', 'extra_args'}
def __init__(self, constructor, desc='', reference_fn=None, fullname=None, **kwargs):
self.desc = desc
self.fullname = fullname
self.constructor = constructor
self.reference_fn = reference_fn
for name in self._required_arg_names:
if name not in kwargs and name + '_fn' not in kwargs and name + '_size' not in kwargs:
if name in {'constructor_args', 'extra_args'}:
kwargs[name] = tuple()
else:
raise ValueError("{}: Specify {} by a value, a function to generate it, or it's size!"
.format(self.get_name(), name))
self._extra_kwargs = kwargs
self._arg_cache = {}
def get_name(self):
if self.fullname is not None:
return 'test_' + self.fullname
test_name = 'test_' + self.constructor.__name__
if self.desc:
test_name += '_' + self.desc
return test_name
def _unpack(self, value):
if isinstance(value, torch.Tensor):
return value
elif is_iterable(value):
return type(value)(self._unpack(v) for v in value)
else:
return value
@property
def constructor_args(self):
return self._get_arg('constructor_args', True)
@property
def extra_args(self):
return self._get_arg('extra_args', True)
def _get_arg(self, name, unpack):
assert name in self._required_arg_names
if name not in self._arg_cache:
fn_name = name + '_fn'
size_name = name + '_size'
if name in self._extra_kwargs:
self._arg_cache[name] = self._extra_kwargs[name]
elif fn_name in self._extra_kwargs:
self._arg_cache[name] = self._extra_kwargs[fn_name]()
else:
assert size_name in self._extra_kwargs, \
"Missing `{}`, `{}` or `{}` for {}".format(name, size_name, fn_name, self.get_name())
def map_tensor_sizes(sizes):
if isinstance(sizes, list):
return [map_tensor_sizes(s) for s in sizes]
elif isinstance(sizes, torch.Tensor):
return sizes.double()
else:
return torch.randn(sizes)
self._arg_cache[name] = map_tensor_sizes(self._extra_kwargs[size_name])
return self._unpack(self._arg_cache[name]) if unpack else self._arg_cache[name]
def _get_input(self, unpack=True):
return self._get_arg('input', unpack)
def __call__(self, test_case):
raise NotImplementedError
class ModuleTest(TestBase):
@abstractmethod
def _do_test(self, test_case: Any, module: nn.Module, input: Any) -> Any:
raise NotImplementedError
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.jacobian_input = kwargs.get('jacobian_input', True)
self.should_test_cuda = kwargs.get('test_cuda', True)
self.should_test_pickle = kwargs.get('pickle', True)
self.check_gradgrad = kwargs.get('check_gradgrad', True)
self.FIXME_no_cuda_gradgrad_comparison = \
kwargs.get('FIXME_no_cuda_gradgrad_comparison', False)
self.precision = kwargs.get('precision', 2e-4)
self.check_forward_only = kwargs.get('check_forward_only', False)
def __call__(self, test_case):
module = self.constructor(*self.constructor_args)
input = self._get_input()
if self.reference_fn is not None:
out = test_case._forward(module, input)
ref_input = deepcopy(input)
ref_module = deepcopy(module)
expected_out = self.reference_fn(ref_input, test_case._get_parameters(module)[0], ref_module)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
test_case.assertEqualIgnoreType(out, expected_out)
if self.check_forward_only:
return
self.test_noncontig(test_case, module, input)
if self.should_test_pickle:
# TODO: do this with in-memory files as soon as torch.save will support it
with tempfile.TemporaryFile() as f:
test_case._forward(module, input)
torch.save(module, f)
f.seek(0)
module_copy = torch.load(f)
test_case.assertEqual(test_case._forward(module, input), test_case._forward(module_copy, input))
self._do_test(test_case, module, input)
def noncontiguize(self, obj):
if isinstance(obj, list):
return [self.noncontiguize(o) for o in obj]
elif isinstance(obj, tuple):
return tuple(self.noncontiguize(o) for o in obj)
tensor = obj
ndim = tensor.dim()
# Always making only the last dimension noncontiguous is easy to hide
# bugs because .view(-1) will still work. So try to find a dim with size
# > 1 and make that non-contiguous, i.e., stack + select on the
# dimension directly after that.
dim = ndim
for d in range(ndim):
if tensor.size(d) > 1:
dim = d + 1
break
noncontig = torch.stack([torch.empty_like(tensor), tensor], dim).select(dim, 1).detach()
assert noncontig.numel() == 1 or noncontig.numel() == 0 or not noncontig.is_contiguous()
noncontig.requires_grad = tensor.requires_grad
return noncontig
def test_noncontig(self, test_case, module, input):
# check no scalars, can't make non-contig
if isinstance(input, torch.Tensor) and input.dim() == 0:
return
if any(i.dim() == 0 for i in input if isinstance(i, torch.Tensor)):
return
test_case._zero_grad_parameters(module)
test_case._zero_grad_input(input)
with freeze_rng_state():
output = test_case._forward(module, input)
if getattr(module, "return_indices", False):
output = output[0]
grad_output = output.new(output.shape).normal_()
output = output.clone()
d_input = deepcopy(test_case._backward(module, input, output, grad_output))
d_param = deepcopy(test_case._get_parameters(module)[1])
nc_input = self.noncontiguize(input)
nc_grad_output = self.noncontiguize(grad_output)
for contig_i, contig_g in product((True, False), repeat=2):
i = input if contig_i else nc_input
# Some ops, e.g., nn.Flatten, return gradient that shares
# storage with the grad_output. Hence we copy here.
go = deepcopy(grad_output if contig_g else nc_grad_output)
test_case._zero_grad_parameters(module)
test_case._zero_grad_input(i)
with freeze_rng_state():
out = test_case._forward(module, i)
if getattr(module, "return_indices", False):
out = out[0]
grad = test_case._backward(module, i, out, go)
test_case.assertEqual(out, output)
test_case.assertEqual(grad, d_input, atol=1e-4, rtol=0)
test_case.assertEqual(test_case._get_parameters(module)[1], d_param)
def test_cuda(self, test_case):
if not TEST_CUDA or not self.should_test_cuda:
raise unittest.SkipTest('Excluded from CUDA tests')
cpu_input = self._get_input()
type_map = {torch.double: torch.float}
cpu_input_tuple = cpu_input if isinstance(cpu_input, tuple) else (cpu_input,)
gpu_input_tuple = to_gpu(cpu_input_tuple, type_map=type_map)
cpu_module = self.constructor(*self.constructor_args)
gpu_module = self.constructor(*self.constructor_args).float().cuda()
cpu_param = test_case._get_parameters(cpu_module)
gpu_param = test_case._get_parameters(gpu_module)
for cpu_p, gpu_p in zip(cpu_param[0], gpu_param[0]):
gpu_p.data.copy_(cpu_p)
test_case._zero_grad_input(cpu_input_tuple)
test_case._zero_grad_input(gpu_input_tuple)
test_case._zero_grad_parameters(cpu_module)
test_case._zero_grad_parameters(gpu_module)
cpu_output = test_case._forward(cpu_module, cpu_input_tuple)
gpu_output = test_case._forward(gpu_module, gpu_input_tuple)
if getattr(cpu_module, "return_indices", False):
cpu_output = cpu_output[0]
gpu_output = gpu_output[0]
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
test_case.assertEqualIgnoreType(cpu_output, gpu_output, atol=self.precision, rtol=0)
# Run backwards on CPU and GPU and compare results
for _ in range(5):
cpu_gradOutput = cpu_output.clone().normal_()
gpu_gradOutput = cpu_gradOutput.type_as(gpu_output)
cpu_gradInput = test_case._backward(cpu_module, cpu_input_tuple, cpu_output, cpu_gradOutput)
gpu_gradInput = test_case._backward(gpu_module, gpu_input_tuple, gpu_output, gpu_gradOutput)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
test_case.assertEqualIgnoreType(cpu_gradInput, gpu_gradInput, atol=self.precision, rtol=0)
for cpu_d_p, gpu_d_p in zip(cpu_param[1], gpu_param[1]):
test_case.assertEqual(cpu_d_p, gpu_d_p, atol=self.precision, rtol=0)
# Run double-backwards on CPU and GPU and compare results
if self.check_gradgrad and not self.FIXME_no_cuda_gradgrad_comparison:
cpu_output = cpu_module(*cpu_input_tuple)
gpu_output = gpu_module(*gpu_input_tuple)
if getattr(cpu_module, "return_indices", False):
cpu_output = cpu_output[0]
gpu_output = gpu_output[0]
cpu_gradOutput = torch.randn_like(cpu_output, requires_grad=True)
gpu_gradOutput = cpu_gradOutput.type_as(gpu_output).detach()
gpu_gradOutput.requires_grad = True
cpu_gradInputs = torch.autograd.grad(
cpu_output,
cpu_input_tuple + tuple(cpu_module.parameters()),
cpu_gradOutput,
create_graph=True)
gpu_gradInputs = torch.autograd.grad(
gpu_output,
gpu_input_tuple + tuple(gpu_module.parameters()),
gpu_gradOutput,
create_graph=True)
for cpu_d_i, gpu_d_i in zip(cpu_gradInputs, gpu_gradInputs):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
test_case.assertEqualIgnoreType(cpu_d_i, gpu_d_i, atol=self.precision, rtol=0)
# We mix output into the second backwards computation so that
# torch.autograd.grad doesn't complain that some inputs
# are unreachable (which can happen if you differentiate
# only on the gradient.
cpu_gg = torch.autograd.grad(
cpu_output.sum() + sum(x.sum() for x in cpu_gradInputs),
cpu_input_tuple + (cpu_gradOutput,) + tuple(cpu_module.parameters()),
retain_graph=True)
gpu_gg = torch.autograd.grad(
gpu_output.sum() + sum(x.sum() for x in gpu_gradInputs),
gpu_input_tuple + (gpu_gradOutput,) + tuple(gpu_module.parameters()),
retain_graph=True)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
test_case.assertEqualIgnoreType(cpu_gradInput, gpu_gradInput, atol=self.precision, rtol=0)
for cpu_d_p, gpu_d_p in zip(cpu_gg, gpu_gg):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
test_case.assertEqualIgnoreType(cpu_d_p, gpu_d_p, atol=self.precision, rtol=0)
self.test_noncontig(test_case, gpu_module, gpu_input_tuple)
class InputVariableMixin(object):
def _get_input(self):
input = TestBase._get_input(self, False) # type: ignore[arg-type]
def map_variables(i):
if isinstance(i, torch.Tensor):
if i.is_floating_point() or i.is_complex():
i.requires_grad = True
return i
else:
return type(i)(map_variables(elem) for elem in i)
return map_variables(input)
class NewModuleTest(InputVariableMixin, ModuleTest): # type: ignore[misc]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cudnn = kwargs.get('cudnn', False)
self.check_inplace = kwargs.get('check_inplace', False)
self.check_gradgrad = kwargs.get('check_gradgrad', True)
self.skip_double = kwargs.get('skip_double', False)
self.skip_half = kwargs.get('skip_half', False)
self.with_tf32 = kwargs.get('with_tf32', False)
self.tf32_precision = kwargs.get('tf32_precision', 0.001)
self.test_cpu = kwargs.get('test_cpu', True)
self.has_sparse_gradients = kwargs.get('has_sparse_gradients', False)
self.check_batched_grad = kwargs.get('check_batched_grad', True)
self.gradcheck_fast_mode = kwargs.get('gradcheck_fast_mode', None)
self.supports_forward_ad = kwargs.get('supports_forward_ad', False)
self.supports_fwgrad_bwgrad = kwargs.get('supports_fwgrad_bwgrad', False)
def _check_gradients(self, test_case, module, input_tuple):
params = tuple(x for x in module.parameters())
num_inputs = len(input_tuple)
def fn_to_gradcheck(*inputs_and_params, **kwargs):
assert not kwargs
return test_case._forward(module, inputs_and_params[:num_inputs])
# gradcheck doesn't support operators that take in dense inputs but
# return sparse parameters. This only happens in the case of nn.Embedding
# and nn.EmbeddingBag. Instead, we call `self.check_jacobian`, which
# is a slightly different version of gradcheck that can handle this.
if self.has_sparse_gradients:
assert num_inputs == 1
test_input_jacobian = torch.is_floating_point(input_tuple[0])
test_case.check_jacobian(module, input_tuple[0], test_input_jacobian)
else:
test_case.assertTrue(gradcheck(fn_to_gradcheck, input_tuple + params,
check_batched_grad=self.check_batched_grad,
fast_mode=self.gradcheck_fast_mode,
check_forward_ad=self.supports_forward_ad))
if self.check_gradgrad:
test_case.assertTrue(gradgradcheck(fn_to_gradcheck, input_tuple + params,
check_batched_grad=self.check_batched_grad,
fast_mode=self.gradcheck_fast_mode,
check_fwd_over_rev=self.supports_fwgrad_bwgrad))
def _do_test(self, test_case, module, input):
num_threads = torch.get_num_threads()
torch.set_num_threads(1)
input_tuple = input if isinstance(input, tuple) else (input,)
self._check_gradients(test_case, module, input_tuple)
# check if module can be printed
module.__repr__()
if self.check_inplace:
# check if the inplace variant of the module gives the same result
# as the out-of-place
# check_inplace doesn't support multiple input tensors, since we don't have any modules
# that modify the inputs in-place and that accept more than one input
assert len(input_tuple) == 1
input = input_tuple[0]
module_ip = self.constructor(*self.constructor_args, inplace=True)
input_version = input._version
with freeze_rng_state():
output = module(input)
test_case.assertEqual(input._version, input_version)
input_ip = deepcopy(input)
input_ip_clone = input_ip.clone()
with freeze_rng_state():
output_ip = module_ip(input_ip_clone)
test_case.assertNotEqual(input_ip_clone._version, input_version)
test_case.assertEqual(output, output_ip)
grad = output.data.clone().normal_()
if input.grad is not None:
with torch.no_grad():
input.grad.zero_()
if input_ip.grad is not None:
with torch.no_grad():
input_ip.grad.zero_()
output.backward(grad)
output_ip.backward(grad)
test_case.assertEqual(input.grad, input_ip.grad)
def assert_module_parameters_are(tensor_type, device_id=None):
for p in module.parameters():
test_case.assertIsInstance(p, tensor_type)
if device_id is not None:
test_case.assertEqual(p.get_device(), device_id)
if all(isinstance(t, torch.LongTensor) for t in input_tuple) and TEST_CUDA:
# check that cuda() moves module parameters to correct GPU device,
# and that float() casts parameters correctly
input_tuple = tuple(t.cuda() for t in input_tuple)
module.float().cuda()
module(*input_tuple)
assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined]
if torch.cuda.device_count() > 1:
input_tuple = tuple(t.cuda(1) for t in input_tuple)
module.cuda(1)
with torch.cuda.device(1):
module(*input_tuple)
assert_module_parameters_are(torch.cuda.FloatTensor, 1) # type: ignore[attr-defined]
else:
# check that float()/double() casters work correctly
def to_type(tensor, real, complex):
if tensor.is_complex():
return tensor.to(complex)
elif tensor.is_floating_point():
return tensor.to(real)
else:
return tensor
def to_half(x):
# TODO: torch.complex32 when properly supported
return to_type(x, torch.float16, None)
def to_single(x):
return to_type(x, torch.float32, torch.complex64)
def to_double(x):
return to_type(x, torch.float64, torch.complex128)
# to float
input_tuple = tuple(to_single(t) for t in input_tuple)
module.float()
module(*input_tuple)
assert_module_parameters_are(torch.FloatTensor)
# and back to double
input_tuple = tuple(to_double(t) for t in input_tuple)
module.double()
module(*input_tuple)
assert_module_parameters_are(torch.DoubleTensor)
if TEST_CUDA and self.should_test_cuda:
# check that cuda() moves module parameters to correct GPU device,
# and that float() casts parameters correctly
# to GPU0
input_tuple = tuple(to_single(t).cuda() for t in input_tuple)
module.float().cuda()
module(*input_tuple)
assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined]
# to CPU
input_tuple = tuple(t.cpu() for t in input_tuple)
module.cpu()
module(*input_tuple)
assert_module_parameters_are(torch.FloatTensor)
# back to GPU0
input_tuple = tuple(t.cuda() for t in input_tuple)
module.cuda()
module(*input_tuple)
assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined]
# test that forwards of module runs correctly without cuDNN
if self.cudnn:
with torch.backends.cudnn.flags(enabled=False):
module(*input_tuple)
assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined]
if torch.cuda.device_count() >= 2:
# test cross-GPU transfer works
# to GPU1
input_tuple = tuple(t.cuda(1) for t in input_tuple)
module.cuda(1)
with torch.cuda.device(1):
module(*input_tuple)
assert_module_parameters_are(torch.cuda.FloatTensor, 1) # type: ignore[attr-defined]
if not self.skip_double:
# test double()
input_tuple = tuple(to_double(t).cuda() for t in input_tuple)
module.double().cuda()
module(*input_tuple)
assert_module_parameters_are(torch.cuda.DoubleTensor, 0) # type: ignore[attr-defined]
# test half()
if not self.skip_half:
input_tuple = tuple(to_half(t).cuda() for t in input_tuple)
module.half().cuda()
module(*input_tuple)
assert_module_parameters_are(torch.cuda.HalfTensor, 0) # type: ignore[attr-defined]
torch.set_num_threads(num_threads)
def _get_target(self):
return self._get_arg('target', False)
@property
def constructor_args(self):
return self._get_arg('constructor_args', False)
class CriterionTest(InputVariableMixin, TestBase): # type: ignore[misc]
# TODO: check that criterions don't ignore grad_output
_required_arg_names = TestBase._required_arg_names.union({'target'})
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.should_test_cuda = kwargs.get('test_cuda', True)
self.check_forward_only = kwargs.get('check_forward_only', False)
self.check_gradgrad = kwargs.get('check_gradgrad', True)
self.check_half = kwargs.get('check_half', True)
self.check_bfloat16 = kwargs.get('check_bfloat16', False)
self.check_complex = kwargs.get('check_complex', False)
self.test_cpu = kwargs.get('test_cpu', True)
self.with_tf32 = kwargs.get('with_tf32', True)
self.tf32_precision = kwargs.get('tf32_precision', 0.001)
self.check_batched_grad = kwargs.get('check_batched_grad', True)
def __call__(self, test_case):
module = self.constructor(*self.constructor_args)
input = self._get_input()
# Check that these methods don't raise errors
module.__repr__()
str(module)
target = self._get_target()
if self.reference_fn is not None:
out = test_case._forward_criterion(module, input, target, extra_args=self.extra_args)
ref_args = (deepcopy(input), deepcopy(target)) + self.extra_args + (module,)
expected_out = self.reference_fn(*ref_args)
test_case.assertEqual(out, expected_out)
if self.check_forward_only:
return
params = tuple(x for x in module.parameters())
if not isinstance(input, tuple):
inputs = (input,) + params + (target,)
def apply_fn(input, target, *params):
return module(input, target)
else:
inputs = input + params + (target,)
def apply_fn(input1, input2, target, *params): # type: ignore[misc]
return module(input1, input2, target)
gradcheck(apply_fn, inputs, check_batched_grad=self.check_batched_grad)
if self.check_gradgrad:
gradgradcheck(apply_fn, inputs, check_batched_grad=self.check_batched_grad)
def test_cuda(self, test_case, dtype, extra_args=None):
def convert_dtype(obj, dtype, requires_grad=False):
if isinstance(obj, torch.Tensor):
return obj.detach().to(dtype=dtype).requires_grad_(requires_grad)
elif isinstance(obj, tuple):
return tuple(convert_dtype(o, dtype, requires_grad) for o in obj)
else:
return obj
if not TEST_CUDA or not self.should_test_cuda:
raise unittest.SkipTest('Excluded from CUDA tests')
cpu_input = self._get_input()
cpu_target = self._get_target()
cpu_module = self.constructor(*self.constructor_args)
gpu_module = self.constructor(*self.constructor_args)
# Convert input, target and module parameters to dtype
cpu_input = convert_dtype(cpu_input, dtype, True)
if cpu_target.is_floating_point() or cpu_target.is_complex():
cpu_target = convert_dtype(cpu_target, dtype)
cpu_module.type(dtype)
gpu_module.type(dtype)
# GPU setup
gpu_input = to_gpu(cpu_input)
gpu_target = to_gpu(cpu_target)
gpu_module.cuda()
# torch.HalfTensor doesn't support most operations, converting back to default
if dtype in {torch.half, torch.bfloat16}:
cpu_input = self._get_input()
cpu_target = self._get_target()
# Loss modules with weights require consistent input/module weight types
cpu_module = self.constructor(*self.constructor_args)
cpu_output = test_case._forward_criterion(cpu_module, cpu_input, cpu_target, extra_args=extra_args)
gpu_output = test_case._forward_criterion(gpu_module, gpu_input, gpu_target, extra_args=extra_args)
# dtype used to be able to be None, so set precision in this way instead of a precision map
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
test_case.assertEqualIgnoreType(cpu_output, gpu_output,
atol=1e-1 if dtype in {torch.half, torch.bfloat16} else 4e-4, rtol=0)
cpu_gradInput = test_case._backward_criterion(
cpu_module, cpu_input, cpu_output, cpu_target, extra_args=extra_args)
gpu_gradInput = test_case._backward_criterion(
gpu_module, gpu_input, gpu_output, gpu_target, extra_args=extra_args)
# dtype used to be able to be None, so set precision in this way instead of a precision map
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
test_case.assertEqualIgnoreType(cpu_gradInput, gpu_gradInput,
atol=1e-1 if dtype in {torch.half, torch.bfloat16} else 4e-4, rtol=0)
def _get_target(self):
return self._get_arg('target', False)
@property
def constructor_args(self):
return self._get_arg('constructor_args', False)
@property
def extra_args(self):
return self._get_arg('extra_args', False)
| pytorch-master | torch/testing/_internal/common_nn.py |
import torch
from torch.testing._internal.common_utils import TEST_WITH_ROCM
class AutocastTestLists(object):
def _rnn_cell_args(self, n, num_chunks, is_lstm, dev, dtype):
input = (torch.randn((n, n), device=dev, dtype=torch.float32),)
hx = ((torch.randn((n, n), device=dev, dtype=torch.float32),
torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
torch.randn((n, n), device=dev, dtype=torch.float32),)
weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih
torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh
torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih
torch.randn((num_chunks * n), device=dev, dtype=torch.float32)) # bias_hh
# returns args as a tuple
return input + hx + weights
# Supplies ops and arguments for test_autocast_* in test/test_cuda.py
def __init__(self, dev):
super().__init__()
n = 8
# Utility arguments, created as one-element tuples
pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
pointwise2_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
mat0_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
mat1_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
mat2_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n))
conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),
torch.randn(dimset, dtype=torch.float32, device=dev))
for dimset in dimsets]
bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),)
element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),)
pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
# The lists below organize ops that autocast needs to test.
# self.list_name corresponds to test_autocast_list_name in test/test_cuda.py.
# Each op is associated with a tuple of valid arguments.
# In addition, cudnn conv ops are not supported on ROCm and hence will
# be skipped by passing TEST_WITH_ROCM flag to those ops in self.torch_fp16 list.
# Some ops implement built-in type promotion. These don't need autocasting,
# but autocasting relies on their promotion, so we include tests to double-check.
self.torch_expect_builtin_promote = [
("eq", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("ge", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("gt", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("le", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("lt", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("ne", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("add", pointwise0_fp32 + pointwise1_fp16, torch.float32),
("div", pointwise0_fp32 + pointwise1_fp16, torch.float32),
("mul", pointwise0_fp32 + pointwise1_fp16, torch.float32),
("cat", (pointwise0_fp16 + pointwise1_fp32,), torch.float32),
("equal", pointwise0_fp32 + pointwise1_fp16, torch.float32),
("stack", (pointwise0_fp16 + pointwise1_fp32,), torch.float32),
]
self.methods_expect_builtin_promote = [
("__eq__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("__ge__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("__gt__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("__le__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("__lt__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("__ne__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("__add__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
("__div__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
("__mul__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
]
# The remaining lists organize ops that autocast treats explicitly.
self.torch_fp16 = [
# deprecated _convolution
("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
(0, 0), 1, False, True, True)),
# the current _convolution
("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
(0, 0), 1, False, True, True, True)),
("conv1d", conv_args_fp32[0]),
("conv2d", conv_args_fp32[1]),
("conv3d", conv_args_fp32[2]),
("conv_tbc", conv_args_fp32[0] + bias_fp32),
("conv_transpose1d", conv_args_fp32[0]),
("conv_transpose2d", conv_args_fp32[1]),
("conv_transpose3d", conv_args_fp32[2]),
("convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, (0, 0), 1)),
("cudnn_convolution", conv_args_fp32[1] + ((0, 0), (1, 1), (1, 1), 1, False, True, True), TEST_WITH_ROCM),
("cudnn_convolution_transpose", conv_args_fp32[1] + ((0, 0), (0, 0), (1, 1),
(1, 1), 1, False, True, True), TEST_WITH_ROCM),
("prelu", pointwise0_fp32 + element0_fp32),
("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32),
("addmv", pointwise0_fp32 + mat2_fp32 + pointwise1_fp32),
("addr", mat0_fp32 + pointwise0_fp32 + pointwise1_fp32),
("matmul", mat0_fp32 + mat1_fp32),
("einsum", "bkhd,bqhd->bqkh", mat0_fp32 + mat1_fp32),
("mm", mat0_fp32 + mat1_fp32),
("mv", mat0_fp32 + pointwise0_fp32),
("chain_matmul", mat0_fp32 + mat1_fp32 + mat2_fp32),
("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32),
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
torch.randn((n, n, n), device=dev, dtype=torch.float32),
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
# _thnn_fused_lstm_cell and _thnn_fused_gru_cell are not Python-exposed as far as I can tell.
# ("_thnn_fused_lstm_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32),
# ("_thnn_fused_gru_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32),
("lstm_cell", self._rnn_cell_args(n, num_chunks=4, is_lstm=True, dev=dev, dtype=torch.float32)),
("gru_cell", self._rnn_cell_args(n, num_chunks=3, is_lstm=False, dev=dev, dtype=torch.float32)),
("rnn_tanh_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)),
("rnn_relu_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)),
]
self.torch_fp32 = [
("acos", (pointwise0_fp16[0].clamp(-.9, 0.9),)),
("asin", (pointwise0_fp16[0].clamp(-.9, 0.9),)),
("cosh", pointwise0_fp16),
("erfinv", (pointwise0_fp16[0].clamp(-.9, .9),)),
("exp", pointwise0_fp16),
("expm1", pointwise0_fp16),
("log", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
("log10", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
("log2", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
("log1p", (pointwise0_fp16[0].clamp(-0.9, 100.0),)),
("reciprocal", pointwise0_fp16),
("rsqrt", (pointwise0_fp16[0].clamp(0.0, 100.0),)),
("sinh", pointwise0_fp16),
("tan", (pointwise0_fp16[0].clamp(-3.1 / 2, 3.1 / 2),)),
("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + pointwise1_fp16),
("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + (1.7,)),
# ("pow", (1.7,) + pointwise0_fp16), # This variant has a backend, but is not documented in the API.
("softmax", pointwise0_fp16 + (0,)),
("log_softmax", pointwise0_fp16 + (0,)),
("layer_norm", pointwise0_fp16 + ((pointwise0_fp16[0].numel(),),)),
("group_norm", mat0_fp16 + (1,)),
("norm", pointwise0_fp16),
("norm", pointwise0_fp16, {"dim": 0}),
# these need magma
# ("norm", mat0_fp16, {"p": "nuc"}),
# ("norm", mat0_fp16, {"p": "nuc", "dim": 0}),
("norm", pointwise0_fp16, {"p": 1}),
("norm", pointwise0_fp16, {"p": 1, "dim": 0}),
("cosine_similarity", mat0_fp16 + mat1_fp16),
("poisson_nll_loss", mat0_fp16 + mat1_fp16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))),
("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.float16),
torch.tensor([[1, 3, 4]], device=dev, dtype=torch.float16),
torch.tensor([1], device=dev, dtype=torch.int))),
("hinge_embedding_loss", mat0_fp16 + (torch.ones(n, device=dev, dtype=torch.int),)),
("kl_div", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)),
("margin_ranking_loss", mat0_fp16 + mat1_fp16 + (torch.ones((n,), device=dev, dtype=torch.float16),)),
("triplet_margin_loss", mat0_fp16 + mat1_fp16 + mat2_fp16),
("binary_cross_entropy_with_logits", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)),
("cumprod", pointwise0_fp16 + (0,)),
("cumsum", pointwise0_fp16 + (0,)),
("dist", pointwise0_fp16 + pointwise1_fp16),
("pdist", mat0_fp16),
("cdist", mat0_fp16 + mat1_fp16),
("prod", pointwise0_fp16),
("prod", pointwise0_fp16 + (0,)),
("renorm", mat0_fp16 + (2, 0, 1.0)),
("sum", pointwise0_fp16),
("sum", mat0_fp16 + (1,)),
("logsumexp", mat0_fp16 + (1,)),
]
self.torch_need_autocast_promote = [
("addcdiv", pointwise0_fp32 + pointwise1_fp16 + (pointwise2_fp16[0].clamp(0.1, 100),)),
("addcmul", pointwise0_fp32 + pointwise1_fp16 + pointwise2_fp16),
("atan2", pointwise0_fp32 + (pointwise1_fp16[0].clamp(0.1, 100),)),
("bilinear", (torch.randn((1, 2), dtype=torch.float16, device=dev),
torch.randn((1, 2), dtype=torch.float32, device=dev),
torch.randn((1, 2, 2), dtype=torch.float16, device=dev),
torch.randn((1,), dtype=torch.float32, device=dev))),
("cross", (torch.randn(3, dtype=torch.float32, device=dev),
torch.randn(3, dtype=torch.float16, device=dev))),
("dot", pointwise0_fp16 + pointwise1_fp32),
("grid_sampler", (torch.randn((2, 3, 33, 22), dtype=torch.float16, device=dev),
torch.randn((2, 22, 11, 2), dtype=torch.float32, device=dev),
0, 0, False)),
("index_put", pointwise0_fp32 + ((torch.tensor([1], device=dev, dtype=torch.long),),
torch.randn(1, device=dev, dtype=torch.float16))),
("index_put", pointwise0_fp16 + ((torch.tensor([1], device=dev, dtype=torch.long),),
torch.randn(1, device=dev, dtype=torch.float32))),
("tensordot", (torch.randn((2, 2, 2), dtype=torch.float32, device=dev),
torch.randn((2, 2, 2), dtype=torch.float16, device=dev))),
("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float32, device=dev),
0,
torch.randint(0, 2, (2, 2, 2), device=dev),
torch.randn((2, 2, 2), dtype=torch.float16, device=dev))),
("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float16, device=dev),
0,
torch.randint(0, 2, (2, 2, 2), device=dev),
torch.randn((2, 2, 2), dtype=torch.float32, device=dev))),
]
self.nn_fp16 = [
("linear", mat0_fp32 + mat1_fp32 + mat2_fp32),
]
self.nn_fp32 = [
("softplus", pointwise0_fp16),
("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.float),
torch.zeros((n,), device=dev, dtype=torch.long))),
("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.half),
torch.zeros((n, n, n), device=dev, dtype=torch.long))),
("l1_loss", mat0_fp16 + mat1_fp16),
("smooth_l1_loss", mat0_fp16 + mat1_fp16),
("mse_loss", mat0_fp16 + mat1_fp16),
("multilabel_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
("soft_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
("multi_margin_loss", mat0_fp16 + (torch.ones((n,), device=dev, dtype=torch.long),)),
]
self.linalg_fp16 = [
("linalg_multi_dot", (mat0_fp32 + mat1_fp32 + mat2_fp32,)),
]
self.methods_fp16 = [
("__matmul__", mat0_fp32 + mat1_fp32)
]
self.methods_fp32 = [
("__pow__", (torch.rand(n, device=dev, dtype=torch.float16), 1.5)),
]
self.banned = [
("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.float32),
torch.rand((n, n), device=dev, dtype=torch.float32)), torch._C._nn),
]
class AutocastCPUTestLists(object):
# Supplies ops and arguments for test_autocast_* in test/test_cpu.py
def __init__(self, dev):
super().__init__()
n = 8
# Utility arguments, created as one-element tuples
pointwise0_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
pointwise1_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
pointwise2_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
mat0_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
mat1_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
mat2_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
dummy_dimsets = ((n,), (n, n), (n, n, n), (n, n, n, n), (n, n, n, n, n))
dummy_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),)
for dimset in dummy_dimsets]
dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n))
conv_args_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),
torch.randn(dimset, dtype=torch.bfloat16, device=dev))
for dimset in dimsets]
conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),
torch.randn(dimset, dtype=torch.float32, device=dev))
for dimset in dimsets]
bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),)
element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),)
pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
dummy_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),)
for dimset in dummy_dimsets]
# The lists below organize ops that autocast needs to test.
# self.list_name corresponds to test_autocast_list_name in test/test_cpu.py.
# Each op is associated with a tuple of valid arguments.
# Some ops implement built-in type promotion. These don't need autocasting,
# but autocasting relies on their promotion, so we include tests to double-check.
self.torch_expect_builtin_promote = [
("eq", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("ge", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("gt", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("le", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("lt", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("ne", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("add", pointwise0_fp32 + pointwise1_bf16, torch.float32),
("div", pointwise0_fp32 + pointwise1_bf16, torch.float32),
("mul", pointwise0_fp32 + pointwise1_bf16, torch.float32),
]
self.methods_expect_builtin_promote = [
("__eq__", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("__ge__", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("__gt__", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("__le__", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("__lt__", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("__ne__", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("__add__", pointwise0_fp32 + pointwise1_bf16, torch.float32),
("__div__", pointwise0_fp32 + pointwise1_bf16, torch.float32),
("__mul__", pointwise0_fp32 + pointwise1_bf16, torch.float32),
]
# The remaining lists organize ops that autocast treats explicitly.
self.torch_bf16 = [
("conv1d", conv_args_fp32[0]),
("conv2d", conv_args_fp32[1]),
("conv3d", conv_args_fp32[2]),
("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
("mm", mat0_fp32 + mat1_fp32),
("matmul", mat0_fp32 + mat1_fp32),
("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
torch.randn((n, n, n), device=dev, dtype=torch.float32),
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32),
("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32),
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
("conv_tbc", (torch.randn((10, 7, 3), device=dev, dtype=torch.float32),
torch.randn((5, 3, 5), device=dev, dtype=torch.float32),
torch.randn(5, device=dev, dtype=torch.float32),
0)),
]
self.torch_fp32 = [
("conv_transpose1d", conv_args_bf16[0]),
("conv_transpose2d", conv_args_bf16[1]),
("conv_transpose3d", conv_args_bf16[2]),
]
self.nn_bf16 = [
("linear", mat0_fp32 + mat1_fp32, {}),
]
self.nn_fp32 = [
("avg_pool3d", dummy_bf16[3], {"kernel_size": (3, 3, 3), "stride": (1, 1, 1)}),
("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),) +
(torch.rand((n, n), device=dev, dtype=torch.bfloat16),)),
("reflection_pad1d", dummy_bf16[2], {"padding": (3, 3)}),
]
self.torch_need_autocast_promote = [
("cat", (pointwise0_bf16 + pointwise1_fp32,)),
("stack", (pointwise0_bf16 + pointwise1_fp32,)),
]
| pytorch-master | torch/testing/_internal/autocast_test_lists.py |
# Owner(s): ["oncall: distributed"]
import functools
import itertools
import sys
from abc import ABC, abstractmethod
from contextlib import suppress
from copy import deepcopy
from enum import Enum, auto
from math import inf
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
from unittest import mock
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed.fsdp import CPUOffload
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import (
BackwardPrefetch,
MixedPrecision,
ShardingStrategy,
TrainingState_,
)
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
from torch.distributed.fsdp.wrap import (
always_wrap_policy,
transformer_auto_wrap_policy,
wrap,
)
from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import (
TEST_SKIPS,
MultiProcessTestCase,
)
from torch.testing._internal.common_utils import FILE_SCHEMA, get_cycles_per_ms
class FSDPInitMode(Enum):
# No FSDP wrapping
NO_FSDP = auto()
# FSDP recursive wrapping
RECURSIVE = auto()
# TODO: FSDP non-recursive wrapping
# NONRECURSIVE = auto()
class CUDAInitMode(Enum):
# Move model to CUDA before passing to the FSDP constructor
CUDA_BEFORE = auto()
# Move model to CUDA after passing to the FSDP constructor
CUDA_AFTER = auto()
# Keep on CPU
CUDA_NEVER = auto()
class FSDPTestModel(nn.Module, ABC):
"""This defines the interface expected from all models used commonly for
FSDP unit tests."""
@abstractmethod
def get_input(self, device) -> Tuple[torch.Tensor, ...]:
"""Returns an input for the model as as tuple."""
...
@abstractmethod
def get_loss(self, input, output) -> torch.Tensor:
"""Returns the loss given the input and output."""
...
@abstractmethod
def run_backward(self, loss) -> None:
"""Runs the backward pass (e.g. including ``loss.backward()``)."""
...
@staticmethod
@abstractmethod
def init(
group: dist.ProcessGroup,
fsdp_init_mode: FSDPInitMode,
*init_args: Any,
cuda_init_mode: CUDAInitMode,
fsdp_kwargs: Optional[Dict[str, Any]] = None,
deterministic: bool = False,
**init_kwargs: Any,
) -> nn.Module:
"""Initializes an instance of this model."""
...
def _assert_module_states(
model: nn.Module,
process_group: dist.ProcessGroup,
assert_fn: Callable,
):
"""
All-gathers module states across ranks and calls ``assert_fn`` on each pair
of corresponding states from rank 0 and a nonzero rank. For example, if
``assert_fn`` is ``self.assertEqual()``, then this checks that all module
states are equal across ranks.
"""
# Include names for debugging convenience
named_module_states = [
(param_name, param.detach().cpu())
for param_name, param in model.named_parameters()
]
named_module_states += [
(buffer_name, buffer.detach().cpu())
for buffer_name, buffer in model.named_buffers()
]
world_size = dist.get_world_size(process_group)
olist = [None for _ in range(world_size)]
dist.all_gather_object(olist, named_module_states, group=process_group)
rank0_states = olist[0]
for state in olist[1:]:
for (_, p1), (_, p2) in zip(rank0_states, state):
assert_fn(p1, p2)
def _zero_model(
model: nn.Module,
zero_buffers: bool = False,
):
"""Zeros the parameters and optionally buffers of ``model`` in place."""
with FSDP.summon_full_params(model):
for param in model.parameters():
with torch.no_grad():
param.zero_()
if zero_buffers:
for buffer in model.buffers():
with torch.no_grad():
buffer.zero_()
def _get_state_dict(model, cpu_offload=False, half=False):
if not cpu_offload:
model = model.cuda()
if half:
model.half()
return model.state_dict()
def subtest_name(test_name_mapping, *args):
return '_'.join(
[test_name_mapping[str(s)] if s is not None else "none" for s in args]
)
def get_full_params(model: nn.Module, recurse: bool = True):
"""
Returns the full unsharded parameters of ``model``. Any FSDP-managed
parameters offloaded to CPU are moved to GPU in the returned list.
Args:
recurse (bool): If ``False``, only unshards the parameters immediate to
``model``; if ``True``, recurses through the module hierarchy
rooted at ``model``.
"""
with FSDP.summon_full_params(model, recurse=recurse):
return deepcopy(list(model.parameters()))
def _maybe_cuda(model: nn.Module, move_to_cuda: bool):
return model.cuda() if move_to_cuda else model
def _maybe_wrap_fsdp(model: nn.Module, wrap_fsdp: bool, *args, **kwargs):
return (
model if not wrap_fsdp
else FSDP(model, *args, **kwargs)
)
class DummyProcessGroup:
def __init__(self, rank: int, size: int):
self._rank = rank
self._size = size
def rank(self) -> int:
return self._rank
def size(self) -> int:
return self._size
def allreduce(self, *args, **kwargs):
dist_wait = mock.Mock()
def get_future():
future = torch.futures.Future()
future.set_result(1)
return future
dist_wait.get_future = get_future
return dist_wait
class DeterministicModel(torch.nn.Module):
def __init__(self, wrap_fsdp, cpu_offload=CPUOffload(offload_params=False)):
super().__init__()
# keep everything deterministic for model initialization
torch.manual_seed(0)
self.inner: Union[torch.nn.Linear, FSDP] = \
torch.nn.Linear(2, 2).cuda()
if wrap_fsdp:
self.inner = FSDP(self.inner, cpu_offload=cpu_offload)
self.outer = torch.nn.Linear(2, 2).cuda()
def forward(self, x):
y = self.inner(x)
return self.outer(y)
class TransformerWithSharedParams(FSDPTestModel):
def __init__(
self,
group: dist.ProcessGroup,
cuda_init_mode: CUDAInitMode,
add_bn: bool,
deterministic: bool,
):
super().__init__()
self.rank = group.rank()
self.world_size = group.size()
if deterministic:
torch.manual_seed(0)
d_vocab = 23
d_model = 16
self.embed_tokens = nn.Embedding(d_vocab, d_model)
self.transformer = nn.Transformer(
d_model=d_model,
num_encoder_layers=2,
num_decoder_layers=2,
dim_feedforward=8,
dropout=0.1,
)
self.output_proj = nn.Linear(d_model, d_vocab)
# share the embedding and output projection weights
self.output_proj.weight = self.embed_tokens.weight
self.register_buffer(
"vocab_bias", self.embed_tokens.weight.new_ones((d_model,))
)
self.register_buffer(
"long_buffer",
torch.zeros_like(self.vocab_bias, dtype=torch.long),
) # type: ignore[arg-type]
self.bs = 2
self.bn = torch.nn.BatchNorm1d(self.bs) if add_bn else torch.nn.Identity()
if cuda_init_mode == CUDAInitMode.CUDA_BEFORE:
self = self.cuda()
if deterministic:
self.eval()
def get_input(self, device):
torch.manual_seed(1 + self.rank) # keep everything deterministic
src = torch.arange(12, device=device).view(6, self.bs) # T x B
tgt = torch.arange(self.bs * 4, device=device).view(4, self.bs) # T x B
return (src, tgt)
def forward(self, src_ids, tgt_ids):
src = self.embed_tokens(src_ids)
src = src + self.vocab_bias + self.long_buffer.type_as(src) # type: ignore[operator]
tgt = self.embed_tokens(tgt_ids)
tgt = self.bn(tgt)
x = self.transformer(src, tgt)
return self.output_proj(x)
def get_loss(self, input, output):
_, tgt = input
return nn.functional.cross_entropy(
output.view(-1, output.size(-1)), tgt.view(-1), reduction="sum"
)
def run_backward(self, loss):
loss.backward()
@staticmethod
def init(
group: dist.ProcessGroup,
fsdp_init_mode: FSDPInitMode,
cuda_init_mode: CUDAInitMode,
fsdp_kwargs: Optional[Dict[str, Any]] = None,
deterministic: bool = False,
add_bn: bool = True,
) -> Union[nn.Module, FSDP]:
"""
Initializes a :class:`TransformerWithSharedParams` instance.
Args:
fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
any modules with FSDP. If ``RECURSIVE``, then wraps with
top-level FSDP. By default, the top-level FSDP uses the
``transformer_auto_wrap_policy()`` for encoder and decoder
layers, but a different auto wrap policy may be specified via
``fsdp_kwargs``.
cuda_init_mode (CUDAInitMode): Determines model movement to CUDA.
fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
forwarded to the FSDP constructor.
deterministic (bool): Whether to make the model deterministic
across constructions.
add_bn (bool): Whether to include batch norm in the model.
"""
if fsdp_kwargs is None:
fsdp_kwargs = {}
if fsdp_init_mode == FSDPInitMode.NO_FSDP:
return TransformerWithSharedParams(group, cuda_init_mode, add_bn, deterministic)
elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
# Default to the `transformer_auto_wrap_policy()`
if "auto_wrap_policy" not in fsdp_kwargs:
auto_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
TransformerEncoderLayer,
TransformerDecoderLayer,
},
)
else:
auto_wrap_policy = fsdp_kwargs.pop("auto_wrap_policy")
fsdp_model = FSDP(
TransformerWithSharedParams(group, cuda_init_mode, add_bn, deterministic),
group,
auto_wrap_policy=auto_wrap_policy,
**fsdp_kwargs,
)
if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
fsdp_model = fsdp_model.cuda()
return fsdp_model
raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
def get_ignored_modules(self):
return [self.transformer]
class NestedWrappedModule(FSDPTestModel):
def __init__(
self,
group: dist.ProcessGroup,
wrap_fsdp: bool,
cuda_init_mode: CUDAInitMode,
deterministic: bool,
**fsdp_kwargs,
):
super().__init__()
self.rank = group.rank()
self.world_size = group.size()
move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE
def _maybe_wrap(layer):
if wrap_fsdp:
return FSDP(layer, group, **fsdp_kwargs)
return layer
if deterministic:
torch.manual_seed(0)
self.module = nn.Sequential(
_maybe_cuda(nn.Linear(8, 4), move_to_cuda),
_maybe_wrap(
nn.Sequential(
_maybe_wrap(_maybe_cuda(nn.Linear(4, 16), move_to_cuda)),
_maybe_cuda(nn.Linear(16, 16), move_to_cuda),
),
),
_maybe_wrap(_maybe_cuda(nn.Linear(16, 4), move_to_cuda)),
_maybe_cuda(nn.Linear(4, 8), move_to_cuda),
)
def get_input(self, device):
torch.manual_seed(1 + self.rank) # keep everything deterministic
return (torch.rand(4, 8, device=device),)
def forward(self, x):
return self.module(x)
def get_loss(self, input, output):
loss = output.sum()
return loss
def run_backward(self, loss):
loss.backward()
@staticmethod
def init(
group: dist.ProcessGroup,
fsdp_init_mode: FSDPInitMode,
cuda_init_mode: CUDAInitMode,
fsdp_kwargs: Optional[Dict[str, Any]] = None,
deterministic: bool = False,
) -> nn.Module:
"""
Initializes a :class:`NestedWrappedModule` instance.
Args:
fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
any modules with FSDP. If ``RECURSIVE``, then wraps some nested
modules with FSDP but not the top-level module. The model may
later be wrapped with a top-level FSDP external to this method
if desired.
cuda_init_mode (CUDAInitMode): Determines model movement to CUDA.
fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
forwarded to the FSDP constructor.
deterministic (bool): Whether to make the model deterministic
across constructions.
"""
if fsdp_kwargs is None:
fsdp_kwargs = {}
if fsdp_init_mode == FSDPInitMode.NO_FSDP:
return NestedWrappedModule(
group,
wrap_fsdp=False,
cuda_init_mode=cuda_init_mode,
deterministic=deterministic,
)
elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
# Does not wrap with top-level FSDP
fsdp_model = NestedWrappedModule(
group,
wrap_fsdp=True,
cuda_init_mode=cuda_init_mode,
deterministic=deterministic,
**fsdp_kwargs,
)
if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
fsdp_model = fsdp_model.cuda()
return fsdp_model
raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
class AlwaysWrapNestedWrappedModule(NestedWrappedModule):
@staticmethod
def init(
group: dist.ProcessGroup,
fsdp_init_mode: FSDPInitMode,
cuda_init_mode: CUDAInitMode,
fsdp_kwargs: Optional[Dict[str, Any]] = None,
deterministic: bool = False,
):
"""
Initializes a :class:`NestedWrappedModule` instance, but unlike
:meth:`NestedWrappedModule.init`, for the ``RECURSIVE`` init mode, this
wraps with top-level FSDP and the ``always_wrap_policy()`` auto wrap
policy.
"""
super_ = super(AlwaysWrapNestedWrappedModule, AlwaysWrapNestedWrappedModule)
model = super_.init(
group=group,
fsdp_init_mode=FSDPInitMode.NO_FSDP,
cuda_init_mode=cuda_init_mode,
fsdp_kwargs=fsdp_kwargs,
deterministic=deterministic,
)
if fsdp_init_mode == FSDPInitMode.NO_FSDP:
return model
elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
fsdp_model = FSDP(model, auto_wrap_policy=always_wrap_policy, **fsdp_kwargs)
if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
fsdp_model = fsdp_model.cuda()
return fsdp_model
class ModuleWithDelay(FSDPTestModel):
"""This class wraps a :class:`FSDPTestModel` to optionally add a delay
after computing the loss and/or before the gradient reduction."""
def __init__(
self,
module: nn.Module,
delay_after_loss_ms: int,
delay_before_reduction_ms: int,
):
super().__init__()
self.delay_after_loss_ms = delay_after_loss_ms
self.delay_before_reduction_ms = delay_before_reduction_ms
self.module = module
def get_input(self, device):
return self.module.get_input(device)
def forward(self, x):
return self.module(x)
def get_loss(self, input, output):
loss = self.module.get_loss(input, output)
if self.delay_after_loss_ms > 0:
torch.cuda._sleep(int(self.delay_after_loss_ms * get_cycles_per_ms()))
return loss
def run_backward(self, loss):
orig_reduce_scatter = torch.distributed._reduce_scatter_base
def _delayed_reduce_scatter(*args, **kwargs):
if self.delay_before_reduction_ms > 0:
torch.cuda._sleep(
int(self.delay_before_reduction_ms * get_cycles_per_ms())
)
return orig_reduce_scatter(*args, **kwargs)
with mock.patch(
"torch.distributed._reduce_scatter_base", _delayed_reduce_scatter
):
self.module.run_backward(loss)
@staticmethod
def init(
module_class: Type[FSDPTestModel],
*model_args: Any,
delay_after_loss_ms: int,
delay_before_reduction_ms: int,
**model_kwargs: Any,
):
"""
Args:
module_class (Type[FSDPTestModel]): Wrapped module class to which
to add delays.
model_args: Positional arguments forwarded to the ``module_class``
``init()``.
delay_after_loss_ms (int): Delay after computing the loss/before
the optimizer step (in ms).
delay_before_reduction_ms (int): Delay before reduce-scattering
gradients (in ms).
model_kwargs: Keyword arguments forwarded to the ``module_class``
``init()``.
"""
return ModuleWithDelay(
module_class.init(*model_args, **model_kwargs),
delay_after_loss_ms,
delay_before_reduction_ms,
)
class NestedWrappedModuleWithDelay(ModuleWithDelay):
@staticmethod
def init(
group: dist.ProcessGroup,
fsdp_init_mode: FSDPInitMode,
cuda_init_mode: CUDAInitMode = CUDAInitMode.CUDA_AFTER,
fsdp_kwargs: Optional[Dict[str, Any]] = None,
deterministic: bool = False,
delay_after_loss_ms: int = 0,
delay_before_reduction_ms: int = 0,
):
return super(NestedWrappedModuleWithDelay, NestedWrappedModuleWithDelay).init(
NestedWrappedModule,
group=group,
fsdp_init_mode=fsdp_init_mode,
cuda_init_mode=cuda_init_mode,
fsdp_kwargs=fsdp_kwargs,
deterministic=deterministic,
delay_after_loss_ms=delay_after_loss_ms,
delay_before_reduction_ms=delay_before_reduction_ms,
)
class DummyDDP(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
class MixtureOfExperts(NestedWrappedModule):
def __init__(
self,
group: dist.ProcessGroup,
wrap_fsdp: bool,
cuda_init_mode: CUDAInitMode,
delay_before_free_ms: int,
deterministic: bool,
**fsdp_kwargs,
):
super().__init__(
group=group,
wrap_fsdp=wrap_fsdp,
cuda_init_mode=cuda_init_mode,
deterministic=deterministic,
)
self.group = group
self.delay_before_free_ms = delay_before_free_ms
self.wrap_fsdp = wrap_fsdp
self.move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE
if deterministic:
# Give each rank different expert parameters
torch.manual_seed(42 + self.rank)
d_expert = 23
d_shared = 12
d_input = 8
expert = _maybe_cuda(nn.Linear(d_expert, d_shared), self.move_to_cuda)
self.num_expert_params = sum([p.numel() for p in expert.parameters()])
for p in expert.parameters():
p.expert = True # type: ignore[attr-defined]
if deterministic:
# Keep all other parameters the same across ranks
torch.manual_seed(0)
shared = _maybe_cuda(nn.Linear(d_shared, d_expert), self.move_to_cuda)
if wrap_fsdp:
# we create a process group of size 1 for the expert params
expert_group = torch.distributed.new_group(
[group.rank()]
) # world size 1 means no shard
expert = FSDP(expert, expert_group, **fsdp_kwargs) # type: ignore[assignment]
shared = FSDP(shared, group, **fsdp_kwargs) # type: ignore[assignment]
self.module = nn.Sequential(
_maybe_cuda(nn.Linear(d_input, d_shared), self.move_to_cuda),
shared,
expert,
_maybe_cuda(nn.Linear(d_shared, d_input), self.move_to_cuda)
)
def forward(self, x):
if self.delay_before_free_ms > 0:
expert = self.module[2]
if isinstance(expert, FSDP):
orig_free_full_params = self.module[2]._free_full_params
def _free_full_params_with_delay(*args):
torch.cuda._sleep(
int(self.delay_before_free_ms * get_cycles_per_ms())
)
return orig_free_full_params(*args)
assert hasattr(
expert, "_free_full_params"
), "expert FSDP module should has _free_full_params attribute."
with mock.patch.object(
expert, "_free_full_params", _free_full_params_with_delay
):
return self.module(x)
return self.module(x)
def run_backward(self, loss):
loss.backward()
# Manually reduce gradients if not wrapped in FullyShardedDataParallel
if not self.wrap_fsdp:
with torch.no_grad():
for p in self.parameters():
if hasattr(p, "expert"):
continue # these params don't need grad reduction
p.grad.div_(self.world_size)
torch.distributed.all_reduce(p.grad, group=self.group)
@staticmethod
def init(
group: dist.ProcessGroup,
fsdp_init_mode: FSDPInitMode,
cuda_init_mode: CUDAInitMode,
fsdp_kwargs: Optional[Dict[str, Any]] = None,
deterministic: bool = False,
delay_before_free_ms: int = 0,
):
"""
Initializes a :class:`MixtureOfExperts` instance.
Args:
fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
any modules with FSDP. If ``RECURSIVE``, then wraps some nested
modules with FSDP, including the expert and shared layers, but
not the top-level module. The model may later be wrapped with a
top-level FSDP external to this method if desired.
cuda_init_mode (CUDAInitMode): Determines model movement to CUDA.
fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
forwarded to the FSDP constructor.
deterministic (bool): Whether to make the model deterministic
across constructions.
delay_before_free_ms (int): Delay before resharding expert
parameters in the forward pass (in ms).
"""
if fsdp_kwargs is None:
fsdp_kwargs = {}
if fsdp_init_mode == FSDPInitMode.NO_FSDP:
return MixtureOfExperts(
group,
wrap_fsdp=False,
cuda_init_mode=cuda_init_mode,
delay_before_free_ms=delay_before_free_ms,
deterministic=deterministic,
)
elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
# Does not wrap with top-level FSDP
fsdp_model = MixtureOfExperts(
group,
wrap_fsdp=True,
cuda_init_mode=cuda_init_mode,
delay_before_free_ms=delay_before_free_ms,
deterministic=deterministic,
**fsdp_kwargs,
)
if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
fsdp_model = fsdp_model.cuda()
return fsdp_model
raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
class FSDPTest(MultiProcessTestCase):
def setUp(self):
super(FSDPTest, self).setUp()
self._spawn_processes()
@property
def world_size(self):
return torch.cuda.device_count() if torch.cuda.is_available() else 4
@property
def process_group(self):
return dist.distributed_c10d._get_default_group()
@property
def init_method(self):
return "{}{file_name}".format(FILE_SCHEMA, file_name=self.file_name)
def _check_cpu_offload(self, fsdp_model, cpu_offload):
self.assertEqual(cpu_offload, fsdp_model.cpu_offload)
def _check_backward_prefetch(self, fsdp_model, backward_prefetch):
self.assertEqual(backward_prefetch, fsdp_model.backward_prefetch)
def _check_forward_prefetch(self, fsdp_model, forward_prefetch):
self.assertEqual(forward_prefetch, fsdp_model.forward_prefetch)
def run_subtests(
self,
subtest_config: Dict[str, List[Any]],
test_fn: Callable,
*test_args,
**test_kwargs: Any,
):
"""
Runs a test function given by ``test_fn`` as a subtest according to the
configurations specified by ``subtest_config``. This amortizes the
costly setup overhead (including process spawn and initializing the
process group) over the subtests.
Args:
subtest_config (Dict[str, List[Any]]): A mapping from subtest
keyword argument name to a list of its possible values.
test_fn (Callable): A callable that runs the actual test.
test_args: Positional arguments to pass to ``test_fn``.
test_kwargs: Keyword arguments to pass to ``test_fn``.
"""
# Convert the config mapping to a list to have a fixed order
subtest_config_items: List[Tuple[str, List[Any]]] = list(subtest_config.items())
subtest_config_keys: List[str] = [item[0] for item in subtest_config_items]
subtest_config_values: List[List[Any]] = [item[1] for item in subtest_config_items]
for values in itertools.product(*subtest_config_values):
# Map keyword to chosen value
subtest_kwargs = {
kwarg: value for kwarg, value in zip(subtest_config_keys, values)
}
with self.subTest(**subtest_kwargs):
test_fn(*test_args, **test_kwargs, **subtest_kwargs)
dist.barrier()
@classmethod
def _run(cls, rank, test_name, file_name, pipe):
self = cls(test_name)
self.rank = rank
self.file_name = file_name
print(f"dist init r={self.rank}, world={self.world_size}")
# Specify gloo backend to make 'init_process_group()' succeed,
# Actual tests will be skipped if there is no enough GPUs.
backend = "nccl" if torch.cuda.is_available() else "gloo"
try:
dist.init_process_group(
init_method=self.init_method,
backend=backend,
world_size=int(self.world_size),
rank=self.rank,
)
except RuntimeError as e:
if "recompile" in e.args[0]:
sys.exit(TEST_SKIPS["backend_unavailable"].exit_code)
raise
if torch.cuda.is_available() and torch.cuda.device_count():
torch.cuda.set_device(self.rank % torch.cuda.device_count())
# Execute barrier prior to running test to ensure that every process
# has finished initialization and that the following test
# immediately exiting due to a skip doesn't cause flakiness.
dist.barrier()
self.run_test(test_name, pipe)
dist.barrier()
dist.destroy_process_group()
sys.exit(0)
def _train_for_several_steps(
self,
model: nn.Module,
num_steps: int,
autocast: bool,
lr: float = 0.01,
fsdp_cpu_offload: Optional[CPUOffload] = None,
norm_type: Optional[Union[float, int]] = None,
save_model: bool = False,
mixed_precision: Optional[MixedPrecision] = None,
enable_sharded_grad_scaler: bool = False,
use_pure_fp16: bool = False,
):
cpu_offload_params = fsdp_cpu_offload and fsdp_cpu_offload.offload_params
model_device = next(model.parameters()).device
sharded_grad_scaler = ShardedGradScaler(enabled=enable_sharded_grad_scaler)
# use SGD with momentum instead of Adam, since Adam is scale invariant
# and this makes it bad for tests
optim = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
for _ in range(num_steps):
optim.zero_grad()
with torch.cuda.amp.autocast(enabled=autocast):
# Inputs always cuda regardless of cpu offloading, or model.device
input = model.module.get_input(torch.device("cuda"))
if use_pure_fp16 or (mixed_precision and not isinstance(model, FSDP)):
if isinstance(input, torch.Tensor):
input = input.half()
else:
input = tuple(x.half() for x in input)
output = model(*input)
# Post-forward, if CPU offloading model param should be on CPU.
if cpu_offload_params and isinstance(model, FSDP):
for p in model.parameters():
# Params should always be on CPU, even if
# p._is_sharded=False
self.assertEqual(p.device, torch.device("cpu"))
loss = model.module.get_loss(input, output).to(model_device)
loss = sharded_grad_scaler.scale(loss)
if not mixed_precision and not use_pure_fp16:
assert (
loss.dtype == torch.float32
), "loss data type should be float32, as the original \
parameter data type is float32."
else:
if use_pure_fp16:
self.assertEqual(loss.dtype, torch.float16)
# FSDP loss is fp16, DDP AMP loss is fp32
elif isinstance(model, FSDP):
self.assertEqual(loss.dtype, mixed_precision.param_dtype)
else:
self.assertEqual(loss.dtype, torch.float32)
model.module.run_backward(loss)
if norm_type is not None:
max_norm = 0.3
if isinstance(model, FSDP):
model.clip_grad_norm_(max_norm, norm_type)
total_norm_after_clip = _collect_total_grad_norm_fsdp(
model, norm_type, self.rank
)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm, norm_type)
total_norm_after_clip = _collect_total_grad_norm_local(
model, norm_type
)
self.assertTrue(total_norm_after_clip <= max_norm)
# Post-backward, if CPU offloading model params should be on CPU.
if cpu_offload_params and isinstance(model, FSDP):
for p in model.parameters():
# Params should always be on CPU, even if
# p._is_sharded=False
self.assertEqual(p.device, torch.device("cpu"))
# Unscale the gradients and step
sharded_grad_scaler.step(optim)
# Update the scale factor
sharded_grad_scaler.update()
# if save_model, simulate save + load.
if save_model:
state_dict = {k: v.clone() for k, v in model.state_dict().items()}
# Zero params, if save/load state_dict did not work properly, this
# would break the parity test with DDP.
_zero_model(model)
model.load_state_dict(state_dict)
if isinstance(model, FSDP):
model._assert_state(TrainingState_.IDLE)
return loss.detach()
def _test_fsdp_parity(
self,
model_class: Type[FSDPTestModel],
fsdp_init_mode: FSDPInitMode,
cuda_init_mode: CUDAInitMode,
ref_init_fn: Optional[Callable] = None,
num_iters: int = 2,
save_model: bool = True,
cpu_offload: CPUOffload = CPUOffload(),
backward_prefetch: Optional[BackwardPrefetch] = None,
forward_prefetch: bool = False,
sharding_strategy: Optional[ShardingStrategy] = None,
mixed_precision: Optional[MixedPrecision] = None,
enable_sharded_grad_scaler: bool = False,
use_pure_fp16: bool = False,
norm_type: Optional[Union[float, int]] = None,
init_kwargs: Optional[Dict[str, Any]] = None,
**fsdp_kwargs,
):
"""
Tests FSDP training against a reference, which defaults to DDP but
may be customized with ``ref_init_fn``.
Args:
model_class (Type[FSDPTestModel]): A model class that inherits from
``FSDPTestModel``, which defines the expected interface.
fsdp_init_mode (FSDPInitMode): The mode to initialize the
FSDP-wrapped model. This should not be ``NO_FSDP``.
ref_init_fn (Optional[Callable]): A callable to invoke that wraps a
non-wrapped model to construct the reference model, where this
wrapper should provide data parallel semantics. If ``None``,
then the callable defaults to the DDP constructor.
"""
assert fsdp_init_mode != FSDPInitMode.NO_FSDP, "Expects an FSDP init mode that wraps with FSDP"
if init_kwargs is None:
init_kwargs = {}
lr = 1e-2
rank = self.process_group.rank()
# Establish reference behavior with DDP
model = model_class.init(
self.process_group,
FSDPInitMode.NO_FSDP,
CUDAInitMode.CUDA_BEFORE,
deterministic=True,
**init_kwargs,
)
if ref_init_fn is None:
ref_model = DDP(model, device_ids=[rank], output_device=rank)
else:
ref_model = ref_init_fn(model)
if use_pure_fp16:
ref_model = ref_model.half()
ref_loss = self._train_for_several_steps(
ref_model,
num_iters,
autocast=mixed_precision is not None,
lr=lr,
fsdp_cpu_offload=cpu_offload,
mixed_precision=mixed_precision,
norm_type=norm_type,
enable_sharded_grad_scaler=enable_sharded_grad_scaler,
use_pure_fp16=use_pure_fp16,
)
ddp_params = list(ref_model.parameters())
# Check against FSDP behavior
fsdp_kwargs.update(
{
"cpu_offload": cpu_offload,
"backward_prefetch": backward_prefetch,
"forward_prefetch": forward_prefetch,
"sharding_strategy": sharding_strategy,
"mixed_precision": mixed_precision,
}
)
try:
fsdp_model = model_class.init(
self.process_group,
fsdp_init_mode,
cuda_init_mode,
fsdp_kwargs,
deterministic=True,
**init_kwargs,
)
except Exception as e:
raise ValueError(f"Initializing {model_class} raised error {str(e)}")
if not isinstance(fsdp_model, FSDP):
# Enforce that we wrap with top-level FSDP since we are comparing
# assuming a data parallel reference and some test models may not
# do so in their `init()` method
fsdp_model = FSDP(fsdp_model, self.process_group, **fsdp_kwargs)
if use_pure_fp16:
# Change the model parameter dtype after FSDP initialization
fsdp_model = fsdp_model.half()
if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
fsdp_model = fsdp_model.cuda()
offload_params = cpu_offload is not None and cpu_offload.offload_params
# Offloading parameters with `CUDA_AFTER` should raise an error during
# lazy initialization due to the parameter devices not being CPU;
# otherwise, all parameter devices should be CPU
expects_device_error = offload_params and cuda_init_mode == CUDAInitMode.CUDA_AFTER
expects_cpu_device = offload_params and cuda_init_mode != CUDAInitMode.CUDA_AFTER
if expects_cpu_device:
cpu_device = torch.device("cpu")
for param in fsdp_model.parameters():
self.assertEqual(param.device, cpu_device)
context = (
self.assertRaisesRegex(AssertionError, "Expected param to be on CPU")
if expects_device_error else suppress()
)
with context:
fsdp_loss = self._train_for_several_steps(
fsdp_model,
num_iters,
autocast=False,
lr=lr,
fsdp_cpu_offload=cpu_offload,
save_model=save_model,
mixed_precision=mixed_precision,
norm_type=norm_type,
enable_sharded_grad_scaler=enable_sharded_grad_scaler,
use_pure_fp16=use_pure_fp16,
)
# No need to check for parameter and loss parity if expecting an error
if expects_device_error:
return
# Check parameter devices are CPU if offloading to CPU before calling
# `get_full_params()`, which will cast the parameters to FP32
if offload_params:
for param in fsdp_model.parameters():
self.assertEqual(param.device, cpu_device)
fsdp_loss = fsdp_loss.cuda()
fsdp_unsharded_params = get_full_params(fsdp_model)
torch.testing.assert_allclose(ref_loss, fsdp_loss)
# Do not check for parameter parity if using mixed precision since (1)
# the DDP parameters are in FP16 (from `half()`) while the FSDP
# parameters are in FP32 (from `summon_full_params()`) and (2) DDP runs
# the optimizer in FP16 while FSDP runs it in FP32
if mixed_precision is not None:
self.assertEqual(
ddp_params,
fsdp_unsharded_params,
exact_device=True,
msg="FSDP did not match DDP",
)
class SkipModule(nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(10, 10, bias=False)
def forward(self, x):
return self.lin(x)
class NestedLinear(nn.Module):
def __init__(self, fsdp_wrap):
super().__init__()
if fsdp_wrap:
self.nested_linear = wrap(nn.Linear(10, 10, bias=False).cuda())
else:
self.nested_linear = nn.Linear(10, 10, bias=False).cuda()
def forward(self, x):
return self.nested_linear(x)
class SkipModel(nn.Module):
def __init__(self, double_nest):
super().__init__()
self.linear = nn.Linear(10, 10, bias=False).cuda()
self.linear_skip = SkipModule().cuda()
self.nested_linear = wrap(NestedLinear(fsdp_wrap=double_nest))
def forward(self, x):
x = self.linear(x)
x = self.linear_skip(x)
x = self.nested_linear(x)
return x
def _collect_total_grad_norm_fsdp(model, norm_type, rank):
total_norm = _collect_total_grad_norm_local(model, norm_type)
op = torch.distributed.ReduceOp.SUM
if norm_type == inf:
op = torch.distributed.ReduceOp.MAX
norm_type = 1.0
return_norm = torch.tensor(total_norm ** norm_type, device=rank)
dist.all_reduce(return_norm, op=op)
return return_norm ** (1.0 / norm_type)
def _collect_total_grad_norm_local(model, norm_type):
if norm_type == inf:
return max(p.grad.abs().max() for p in model.parameters())
else:
total_norm = 0.0
for p in model.parameters():
local_norm = torch.linalg.vector_norm(p.grad, norm_type, dtype=torch.float32)
total_norm += local_norm ** norm_type
return total_norm ** (1.0 / norm_type)
| pytorch-master | torch/testing/_internal/common_fsdp.py |
import copy
import gc
import inspect
import runpy
import threading
from collections import namedtuple
from enum import Enum
from functools import wraps
from typing import List, Any, ClassVar, Optional, Sequence, Tuple, Union, Dict, Set
import unittest
import os
import torch
from torch.testing._internal.common_utils import TestCase, TEST_WITH_ROCM, TEST_MKL, \
skipCUDANonDefaultStreamIf, TEST_WITH_ASAN, TEST_WITH_UBSAN, TEST_WITH_TSAN, \
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, IS_WINDOWS, DeterministicGuard, \
_TestParametrizer, compose_parametrize_fns, dtype_name, \
TEST_WITH_MIOPEN_SUGGEST_NHWC, NATIVE_DEVICES
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_dtype import get_all_dtypes
# The implementation should be moved here as soon as the deprecation period is over.
from torch.testing._legacy import get_all_device_types # noqa: F401
try:
import psutil # type: ignore[import]
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
# Note [Writing Test Templates]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# This note was written shortly after the PyTorch 1.9 release.
# If you notice it's out-of-date or think it could be improved then please
# file an issue.
#
# PyTorch has its own framework for instantiating test templates. That is, for
# taking test classes that look similar to unittest or pytest
# compatible test classes and optionally doing the following:
#
# - instantiating a version of the test class for each available device type
# (often the CPU, CUDA, and META device types)
# - further instantiating a version of each test that's always specialized
# on the test class's device type, and optionally specialized further
# on datatypes or operators
#
# This functionality is similar to pytest's parametrize functionality
# (see https://docs.pytest.org/en/6.2.x/parametrize.html), but with considerable
# additional logic that specializes the instantiated test classes for their
# device types (see CPUTestBase and CUDATestBase below), supports a variety
# of composable decorators that allow for test filtering and setting
# tolerances, and allows tests parametrized by operators to instantiate
# only the subset of device type x dtype that operator supports.
#
# This framework was built to make it easier to write tests that run on
# multiple device types, multiple datatypes (dtypes), and for multiple
# operators. It's also useful for controlling which tests are run. For example,
# only tests that use a CUDA device can be run on platforms with CUDA.
# Let's dive in with an example to get an idea for how it works:
#
# --------------------------------------------------------
# A template class (looks like a regular unittest TestCase)
# class TestClassFoo(TestCase):
#
# # A template test that can be specialized with a device
# # NOTE: this test case is not runnably by unittest or pytest because it
# # accepts an extra positional argument, "device", they do not understand
# def test_bar(self, device):
# pass
#
# # Function that instantiates a template class and its tests
# instantiate_device_type_tests(TestCommon, globals())
# --------------------------------------------------------
#
# In the above code example we see a template class and a single test template
# that can be instantiated with a device. The function
# instantiate_device_type_tests(), called at file scope, instantiates
# new test classes, one per available device type, and new tests in those
# classes from these templates. It actually does this by removing
# the class TestClassFoo and replacing it with classes like TestClassFooCPU
# and TestClassFooCUDA, instantiated test classes that inherit from CPUTestBase
# and CUDATestBase respectively. Additional device types, like XLA,
# (see https://github.com/pytorch/xla) can further extend the set of
# instantiated test classes to create classes like TestClassFooXLA.
#
# The test template, test_bar(), is also instantiated. In this case the template
# is only specialized on a device, so (depending on the available device
# types) it might become test_bar_cpu() in TestClassFooCPU and test_bar_cuda()
# in TestClassFooCUDA. We can think of the instantiated test classes as
# looking like this:
#
# --------------------------------------------------------
# # An instantiated test class for the CPU device type
# class TestClassFooCPU(CPUTestBase):
#
# # An instantiated test that calls the template with the string representation
# # of a device from the test class's device type
# def test_bar_cpu(self):
# test_bar(self, 'cpu')
#
# # An instantiated test class for the CUDA device type
# class TestClassFooCUDA(CUDATestBase):
#
# # An instantiated test that calls the template with the string representation
# # of a device from the test class's device type
# def test_bar_cuda(self):
# test_bar(self, 'cuda:0')
# --------------------------------------------------------
#
# These instantiated test classes are discoverable and runnable by both
# unittest and pytest. One thing that may be confusing, however, is that
# attempting to run "test_bar" will not work, despite it appearing in the
# original template code. This is because "test_bar" is no longer discoverable
# after instantiate_device_type_tests() runs, as the above snippet shows.
# Instead "test_bar_cpu" and "test_bar_cuda" may be run directly, or both
# can be run with the option "-k test_bar".
#
# Removing the template class and adding the instantiated classes requires
# passing "globals()" to instantiate_device_type_tests(), because it
# edits the file's Python objects.
#
# As mentioned, tests can be additionally parametrized on dtypes or
# operators. Datatype parametrization uses the @dtypes decorator and
# require a test template like this:
#
# --------------------------------------------------------
# # A template test that can be specialized with a device and a datatype (dtype)
# @dtypes(torch.float32, torch.int64)
# def test_car(self, device, dtype)
# pass
# --------------------------------------------------------
#
# If the CPU and CUDA device types are available this test would be
# instantiated as 4 tests that cover the cross-product of the two dtypes
# and two device types:
#
# - test_car_cpu_float32
# - test_car_cpu_int64
# - test_car_cuda_float32
# - test_car_cuda_int64
#
# The dtype is passed as a torch.dtype object.
#
# Tests parametrized on operators (actually on OpInfos, more on that in a
# moment...) use the @ops decorator and require a test template like this:
# --------------------------------------------------------
# # A template test that can be specialized with a device, dtype, and OpInfo
# @ops(op_db)
# def test_car(self, device, dtype, op)
# pass
# --------------------------------------------------------
#
# See the documentation for the @ops decorator below for additional details
# on how to use it and see the note [OpInfos] in
# common_methods_invocations.py for more details on OpInfos.
#
# A test parametrized over the entire "op_db", which contains hundreds of
# OpInfos, will likely have hundreds or thousands of instantiations. The
# test will be instantiated on the cross-product of device types, operators,
# and the dtypes the operator supports on that device type. The instantiated
# tests will have names like:
#
# - test_car_add_cpu_float32
# - test_car_sub_cuda_int64
#
# The first instantiated test calls the original test_car() with the OpInfo
# for torch.add as its "op" argument, the string 'cpu' for its "device" argument,
# and the dtype torch.float32 for is "dtype" argument. The second instantiated
# test calls the test_car() with the OpInfo for torch.sub, a CUDA device string
# like 'cuda:0' or 'cuda:1' for its "device" argument, and the dtype
# torch.int64 for its "dtype argument."
#
# Clever test filtering can be very useful when working with parametrized
# tests. "-k test_car" would run every instantiated variant of the test_car()
# test template, and "-k test_car_add" runs every variant instantiated with
# torch.add.
#
# It is important to use the passed device and dtype as appropriate. Use
# helper functions like make_tensor() that require explicitly specifying
# the device and dtype so they're not forgotten.
#
# Test templates can use a variety of composable decorators to specify
# additional options and requirements, some are listed here:
#
# - @deviceCountAtLeast(<minimum number of devices to run test with>)
# Passes a list of strings representing all available devices of
# the test class's device type as the test template's "device" argument.
# If there are a fewer devices than the value passed to the decorator
# the test is skipped.
# - @dtypes(<list of tuples of dtypes>)
# In addition to accepting multiple dtypes, the @dtypes decorator
# can accept a sequence of tuple pairs of dtypes. The test template
# will be called with each tuple for its "dtype" argument.
# - @onlyNativeDeviceTypes
# Skips the test if the device is not a native device type (currently CPU, CUDA, Meta)
# - @onlyCPU
# Skips the test if the device is not a CPU device
# - @onlyCUDA
# Skips the test if the device is not a CUDA device
# - @skipCPUIfNoLapack
# Skips the test if the device is a CPU device and LAPACK is not installed
# - @skipCPUIfNoMkl
# Skips the test if the device is a CPU device and MKL is not installed
# - @skipCUDAIfNoMagma
# Skips the test if the device is a CUDA device and MAGMA is not installed
# - @skipCUDAIfRocm
# Skips the test if the device is a CUDA device and ROCm is being used
# Note [Adding a Device Type]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# To add a device type:
#
# (1) Create a new "TestBase" extending DeviceTypeTestBase.
# See CPUTestBase and CUDATestBase below.
# (2) Define the "device_type" attribute of the base to be the
# appropriate string.
# (3) Add logic to this file that appends your base class to
# device_type_test_bases when your device type is available.
# (4) (Optional) Write setUpClass/tearDownClass class methods that
# instantiate dependencies (see MAGMA in CUDATestBase).
# (5) (Optional) Override the "instantiate_test" method for total
# control over how your class creates tests.
#
# setUpClass is called AFTER tests have been created and BEFORE and ONLY IF
# they are run. This makes it useful for initializing devices and dependencies.
# Note [Overriding methods in generic tests]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Device generic tests look a lot like normal test classes, but they differ
# from ordinary classes in some important ways. In particular, overriding
# methods in generic tests doesn't work quite the way you expect.
#
# class TestFooDeviceType(TestCase):
# # Intention is to override
# def assertEqual(self, x, y):
# # This DOESN'T WORK!
# super(TestFooDeviceType, self).assertEqual(x, y)
#
# If you try to run this code, you'll get an error saying that TestFooDeviceType
# is not in scope. This is because after instantiating our classes, we delete
# it from the parent scope. Instead, you need to hardcode a direct invocation
# of the desired subclass call, e.g.,
#
# class TestFooDeviceType(TestCase):
# # Intention is to override
# def assertEqual(self, x, y):
# TestCase.assertEqual(x, y)
#
# However, a less error-prone way of customizing the behavior of TestCase
# is to either (1) add your functionality to TestCase and make it toggled
# by a class attribute, or (2) create your own subclass of TestCase, and
# then inherit from it for your generic test.
def _dtype_test_suffix(dtypes):
""" Returns the test suffix for a dtype, sequence of dtypes, or None. """
if isinstance(dtypes, list) or isinstance(dtypes, tuple):
if len(dtypes) == 0:
return ''
return '_' + '_'.join((dtype_name(d) for d in dtypes))
elif dtypes:
return '_{}'.format(dtype_name(dtypes))
else:
return ''
def _update_param_kwargs(param_kwargs, name, value):
""" Adds a kwarg with the specified name and value to the param_kwargs dict. """
if isinstance(value, list) or isinstance(value, tuple):
# Make name plural (e.g. devices / dtypes) if the value is composite.
param_kwargs['{}s'.format(name)] = value
elif value is not None:
param_kwargs[name] = value
# Leave param_kwargs as-is when value is None.
class DeviceTypeTestBase(TestCase):
device_type: str = 'generic_device_type'
# Flag to disable test suite early due to unrecoverable error such as CUDA error.
_stop_test_suite = False
# Precision is a thread-local setting since it may be overridden per test
_tls = threading.local()
_tls.precision = TestCase._precision
_tls.rel_tol = TestCase._rel_tol
@property
def precision(self):
return self._tls.precision
@precision.setter
def precision(self, prec):
self._tls.precision = prec
@property
def rel_tol(self):
return self._tls.rel_tol
@rel_tol.setter
def rel_tol(self, prec):
self._tls.rel_tol = prec
# Returns a string representing the device that single device tests should use.
# Note: single device tests use this device exclusively.
@classmethod
def get_primary_device(cls):
return cls.device_type
# Returns a list of strings representing all available devices of this
# device type. The primary device must be the first string in the list
# and the list must contain no duplicates.
# Note: UNSTABLE API. Will be replaced once PyTorch has a device generic
# mechanism of acquiring all available devices.
@classmethod
def get_all_devices(cls):
return [cls.get_primary_device()]
# Returns the dtypes the test has requested.
# Prefers device-specific dtype specifications over generic ones.
@classmethod
def _get_dtypes(cls, test):
if not hasattr(test, 'dtypes'):
return None
default_dtypes = test.dtypes.get('all')
msg = f"@dtypes is mandatory when using @dtypesIf however '{test.__name__}' didn't specify it"
assert default_dtypes is not None, msg
return test.dtypes.get(cls.device_type, default_dtypes)
def _get_precision_override(self, test, dtype):
if not hasattr(test, 'precision_overrides'):
return self.precision
return test.precision_overrides.get(dtype, self.precision)
def _get_tolerance_override(self, test, dtype):
if not hasattr(test, 'tolerance_overrides'):
return self.precision, self.rel_tol
return test.tolerance_overrides.get(dtype, tol(self.precision, self.rel_tol))
def _apply_precision_override_for_test(self, test, param_kwargs):
dtype = param_kwargs['dtype'] if 'dtype' in param_kwargs else None
dtype = param_kwargs['dtypes'] if 'dtypes' in param_kwargs else dtype
if dtype:
self.precision = self._get_precision_override(test, dtype)
self.precision, self.rel_tol = self._get_tolerance_override(test, dtype)
# Creates device-specific tests.
@classmethod
def instantiate_test(cls, name, test, *, generic_cls=None):
def instantiate_test_helper(cls, name, *, test, param_kwargs=None):
# Constructs the test
@wraps(test)
def instantiated_test(self, param_kwargs=param_kwargs):
# Add the device param kwarg if the test needs device or devices.
param_kwargs = {} if param_kwargs is None else param_kwargs
test_sig_params = inspect.signature(test).parameters
if 'device' in test_sig_params or 'devices' in test_sig_params:
device_arg: str = cls.get_primary_device()
if hasattr(test, 'num_required_devices'):
device_arg = cls.get_all_devices()
_update_param_kwargs(param_kwargs, 'device', device_arg)
# Sets precision and runs test
# Note: precision is reset after the test is run
guard_precision = self.precision
guard_rel_tol = self.rel_tol
try:
self._apply_precision_override_for_test(test, param_kwargs)
result = test(self, **param_kwargs)
except RuntimeError as rte:
# check if rte should stop entire test suite.
self._stop_test_suite = self._should_stop_test_suite()
# Check if test has been decorated with `@expectedFailure`
# Using `__unittest_expecting_failure__` attribute, see
# https://github.com/python/cpython/blob/ffa505b580464/Lib/unittest/case.py#L164
# In that case, make it fail with "unexpected success" by suppressing exception
if getattr(test, "__unittest_expecting_failure__", False) and self._stop_test_suite:
import sys
print("Suppressing fatal exception to trigger unexpected success", file=sys.stderr)
return
# raise the runtime error as is for the test suite to record.
raise rte
finally:
self.precision = guard_precision
self.rel_tol = guard_rel_tol
return result
assert not hasattr(cls, name), "Redefinition of test {0}".format(name)
setattr(cls, name, instantiated_test)
def default_parametrize_fn(test, generic_cls, device_cls):
# By default, no parametrization is needed.
yield (test, '', {})
# Parametrization decorators set the parametrize_fn attribute on the test.
parametrize_fn = test.parametrize_fn if hasattr(test, 'parametrize_fn') else default_parametrize_fn
# If one of the @dtypes* decorators is present, also parametrize over the dtypes set by it.
dtypes = cls._get_dtypes(test)
if dtypes is not None:
def dtype_parametrize_fn(test, generic_cls, device_cls, dtypes=dtypes):
for dtype in dtypes:
param_kwargs: Dict[str, Any] = {}
_update_param_kwargs(param_kwargs, "dtype", dtype)
# Note that an empty test suffix is set here so that the dtype can be appended
# later after the device.
yield (test, '', param_kwargs)
parametrize_fn = compose_parametrize_fns(dtype_parametrize_fn, parametrize_fn)
# Instantiate the parametrized tests.
for (test, test_suffix, param_kwargs) in parametrize_fn(test, generic_cls, cls):
test_suffix = '' if test_suffix == '' else '_' + test_suffix
device_suffix = '_' + cls.device_type
# Note: device and dtype suffix placement
# Special handling here to place dtype(s) after device according to test name convention.
dtype_kwarg = None
if 'dtype' in param_kwargs or 'dtypes' in param_kwargs:
dtype_kwarg = param_kwargs['dtypes'] if 'dtypes' in param_kwargs else param_kwargs['dtype']
test_name = '{}{}{}{}'.format(name, test_suffix, device_suffix, _dtype_test_suffix(dtype_kwarg))
instantiate_test_helper(cls=cls, name=test_name, test=test, param_kwargs=param_kwargs)
def run(self, result=None):
super().run(result=result)
# Early terminate test if _stop_test_suite is set.
if self._stop_test_suite:
result.stop()
class CPUTestBase(DeviceTypeTestBase):
device_type = 'cpu'
# No critical error should stop CPU test suite
def _should_stop_test_suite(self):
return False
class CUDATestBase(DeviceTypeTestBase):
device_type = 'cuda'
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
primary_device: ClassVar[str]
cudnn_version: ClassVar[Any]
no_magma: ClassVar[bool]
no_cudnn: ClassVar[bool]
def has_cudnn(self):
return not self.no_cudnn
@classmethod
def get_primary_device(cls):
return cls.primary_device
@classmethod
def get_all_devices(cls):
primary_device_idx = int(cls.get_primary_device().split(':')[1])
num_devices = torch.cuda.device_count()
prim_device = cls.get_primary_device()
cuda_str = 'cuda:{0}'
non_primary_devices = [cuda_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx]
return [prim_device] + non_primary_devices
@classmethod
def setUpClass(cls):
# has_magma shows up after cuda is initialized
t = torch.ones(1).cuda()
cls.no_magma = not torch.cuda.has_magma
# Determines if cuDNN is available and its version
cls.no_cudnn = not torch.backends.cudnn.is_acceptable(t)
cls.cudnn_version = None if cls.no_cudnn else torch.backends.cudnn.version()
# Acquires the current device as the primary (test) device
cls.primary_device = 'cuda:{0}'.format(torch.cuda.current_device())
# See Note [Lazy Tensor tests in device agnostic testing]
lazy_ts_backend_init = False
class LazyTestBase(DeviceTypeTestBase):
device_type = 'lazy'
def _should_stop_test_suite(self):
return False
@classmethod
def setUpClass(cls):
import torch._lazy
import torch._lazy.metrics
import torch._lazy.ts_backend
global lazy_ts_backend_init
if not lazy_ts_backend_init:
# Need to connect the TS backend to lazy key before running tests
torch._lazy.ts_backend.init()
lazy_ts_backend_init = True
class MPSTestBase(DeviceTypeTestBase):
device_type = 'mps'
def _should_stop_test_suite(self):
return False
# TODO: Maybe override `_get_dtypes`, `_get_precision_override`
# Adds available device-type-specific test base classes
def get_device_type_test_bases():
# set type to List[Any] due to mypy list-of-union issue:
# https://github.com/python/mypy/issues/3351
test_bases: List[Any] = list()
if IS_SANDCASTLE or IS_FBCODE:
if IS_REMOTE_GPU:
# Skip if sanitizer is enabled
if not TEST_WITH_ASAN and not TEST_WITH_TSAN and not TEST_WITH_UBSAN:
test_bases.append(CUDATestBase)
else:
test_bases.append(CPUTestBase)
else:
test_bases.append(CPUTestBase)
if torch.cuda.is_available():
test_bases.append(CUDATestBase)
# Disable MPS testing in generic device testing temporarily while we're
# ramping up support.
# elif torch.backends.mps.is_available():
# test_bases.append(MPSTestBase)
return test_bases
device_type_test_bases = get_device_type_test_bases()
def filter_desired_device_types(device_type_test_bases, except_for=None, only_for=None):
# device type cannot appear in both except_for and only_for
intersect = set(except_for if except_for else []) & set(only_for if only_for else [])
assert not intersect, f"device ({intersect}) appeared in both except_for and only_for"
if except_for:
device_type_test_bases = filter(
lambda x: x.device_type not in except_for, device_type_test_bases)
if only_for:
device_type_test_bases = filter(
lambda x: x.device_type in only_for, device_type_test_bases)
return list(device_type_test_bases)
# Note [How to extend DeviceTypeTestBase to add new test device]
# The following logic optionally allows downstream projects like pytorch/xla to
# add more test devices.
# Instructions:
# - Add a python file (e.g. pytorch/xla/test/pytorch_test_base.py) in downstream project.
# - Inside the file, one should inherit from `DeviceTypeTestBase` class and define
# a new DeviceTypeTest class (e.g. `XLATestBase`) with proper implementation of
# `instantiate_test` method.
# - DO NOT import common_device_type inside the file.
# `runpy.run_path` with `globals()` already properly setup the context so that
# `DeviceTypeTestBase` is already available.
# - Set a top-level variable `TEST_CLASS` equal to your new class.
# E.g. TEST_CLASS = XLATensorBase
# - To run tests with new device type, set `TORCH_TEST_DEVICE` env variable to path
# to this file. Multiple paths can be separated by `:`.
# See pytorch/xla/test/pytorch_test_base.py for a more detailed example.
_TORCH_TEST_DEVICES = os.environ.get('TORCH_TEST_DEVICES', None)
if _TORCH_TEST_DEVICES:
for path in _TORCH_TEST_DEVICES.split(':'):
# runpy (a stdlib module) lacks annotations
mod = runpy.run_path(path, init_globals=globals()) # type: ignore[func-returns-value]
device_type_test_bases.append(mod['TEST_CLASS'])
PYTORCH_CUDA_MEMCHECK = os.getenv('PYTORCH_CUDA_MEMCHECK', '0') == '1'
PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY = 'PYTORCH_TESTING_DEVICE_ONLY_FOR'
PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY = 'PYTORCH_TESTING_DEVICE_EXCEPT_FOR'
# Adds 'instantiated' device-specific test cases to the given scope.
# The tests in these test cases are derived from the generic tests in
# generic_test_class.
# See note "Generic Device Type Testing."
def instantiate_device_type_tests(generic_test_class, scope, except_for=None, only_for=None, include_lazy=False):
# Removes the generic test class from its enclosing scope so its tests
# are not discoverable.
del scope[generic_test_class.__name__]
# Creates an 'empty' version of the generic_test_class
# Note: we don't inherit from the generic_test_class directly because
# that would add its tests to our test classes and they would be
# discovered (despite not being runnable). Inherited methods also
# can't be removed later, and we can't rely on load_tests because
# pytest doesn't support it (as of this writing).
empty_name = generic_test_class.__name__ + "_base"
empty_class = type(empty_name, generic_test_class.__bases__, {})
# Acquires members names
# See Note [Overriding methods in generic tests]
generic_members = set(generic_test_class.__dict__.keys()) - set(empty_class.__dict__.keys())
generic_tests = [x for x in generic_members if x.startswith('test')]
# Filter out the device types based on user inputs
desired_device_type_test_bases = filter_desired_device_types(device_type_test_bases,
except_for, only_for)
if include_lazy:
# Note [Lazy Tensor tests in device agnostic testing]
# Right now, test_view_ops.py runs with LazyTensor.
# We don't want to opt every device-agnostic test into using the lazy device,
# because many of them will fail.
# So instead, the only way to opt a specific device-agnostic test file into
# lazy tensor testing is with include_lazy=True
desired_device_type_test_bases.append(LazyTestBase)
def split_if_not_empty(x: str):
return x.split(",") if len(x) != 0 else []
# Filter out the device types based on environment variables if available
# Usage:
# export PYTORCH_TESTING_DEVICE_ONLY_FOR=cuda,cpu
# export PYTORCH_TESTING_DEVICE_EXCEPT_FOR=xla
env_only_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, ''))
env_except_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, ''))
desired_device_type_test_bases = filter_desired_device_types(desired_device_type_test_bases,
env_except_for, env_only_for)
# Creates device-specific test cases
for base in desired_device_type_test_bases:
# Special-case for ROCm testing -- only test for 'cuda' i.e. ROCm device by default
# The except_for and only_for cases were already checked above. At this point we only need to check 'cuda'.
if TEST_WITH_ROCM and base.device_type != 'cuda':
continue
class_name = generic_test_class.__name__ + base.device_type.upper()
# type set to Any and suppressed due to unsupport runtime class:
# https://github.com/python/mypy/wiki/Unsupported-Python-Features
device_type_test_class: Any = type(class_name, (base, empty_class), {})
for name in generic_members:
if name in generic_tests: # Instantiates test member
test = getattr(generic_test_class, name)
# XLA-compat shim (XLA's instantiate_test takes doesn't take generic_cls)
sig = inspect.signature(device_type_test_class.instantiate_test)
if len(sig.parameters) == 3:
# Instantiates the device-specific tests
device_type_test_class.instantiate_test(name, copy.deepcopy(test), generic_cls=generic_test_class)
else:
device_type_test_class.instantiate_test(name, copy.deepcopy(test))
else: # Ports non-test member
assert name not in device_type_test_class.__dict__, "Redefinition of directly defined member {0}".format(name)
nontest = getattr(generic_test_class, name)
setattr(device_type_test_class, name, nontest)
# Mimics defining the instantiated class in the caller's file
# by setting its module to the given class's and adding
# the module to the given scope.
# This lets the instantiated class be discovered by unittest.
device_type_test_class.__module__ = generic_test_class.__module__
scope[class_name] = device_type_test_class
# Category of dtypes to run an OpInfo-based test for
# Example use: @ops(dtype=OpDTypes.supported)
#
# There are 5 categories:
# - supported: Every dtype supported by the operator. Use for exhaustive
# testing of all dtypes.
# - unsupported: Run tests on dtypes not supported by the operator. e.g. for
# testing the operator raises an error and doesn't crash.
# - supported_backward: Every dtype supported by the operator's backward pass.
# - unsupported_backward: Run tests on dtypes not supported by the operator's backward pass.
# - any_one: Runs a test for one dtype the operator supports. Prioritizes dtypes the
# operator supports in both forward and backward.
# - none: Useful for tests that are not dtype-specific. No dtype will be passed to the test
# when this is selected.
class OpDTypes(Enum):
supported = 0 # Test all supported dtypes (default)
unsupported = 1 # Test only unsupported dtypes
supported_backward = 2 # Test all supported backward dtypes
unsupported_backward = 3 # Test only unsupported backward dtypes
any_one = 4 # Test precisely one supported dtype
none = 5 # Instantiate no dtype variants (no dtype kwarg needed)
# Decorator that defines the OpInfos a test template should be instantiated for.
#
# Example usage:
#
# @ops(unary_ufuncs)
# def test_numerics(self, device, dtype, op):
# <test_code>
#
# This will instantiate variants of test_numerics for each given OpInfo,
# on each device the OpInfo's operator supports, and for every dtype supported by
# that operator. There are a few caveats to the dtype rule, explained below.
#
# The @ops decorator can accept two
# additional arguments, "dtypes" and "allowed_dtypes". If "dtypes" is specified
# then the test variants are instantiated for those dtypes, regardless of
# what the operator supports. If given "allowed_dtypes" then test variants
# are instantiated only for the intersection of allowed_dtypes and the dtypes
# they would otherwise be instantiated with. That is, allowed_dtypes composes
# with the options listed above and below.
#
# The "dtypes" argument can also accept additional values (see OpDTypes above):
# OpDTypes.supported - the test is instantiated for all dtypes the operator
# supports
# OpDTypes.unsupported - the test is instantiated for all dtypes the operator
# doesn't support
# OpDTypes.supported_backward - the test is instantiated for all dtypes the
# operator's gradient formula supports
# OpDTypes.unsupported_backward - the test is instantiated for all dtypes the
# operator's gradient formula doesn't support
# OpDTypes.any_one - the test is instantiated for one dtype the
# operator supports. The dtype supports forward and backward if possible.
# OpDTypes.none - the test is instantiated without any dtype. The test signature
# should not include a dtype kwarg in this case.
#
# These options allow tests to have considerable control over the dtypes
# they're instantiated for.
class ops(_TestParametrizer):
def __init__(self, op_list, *, dtypes: Union[OpDTypes, Sequence[torch.dtype]] = OpDTypes.supported,
allowed_dtypes: Optional[Sequence[torch.dtype]] = None):
self.op_list = list(op_list)
self.opinfo_dtypes = dtypes
self.allowed_dtypes = set(allowed_dtypes) if allowed_dtypes is not None else None
def _parametrize_test(self, test, generic_cls, device_cls):
""" Parameterizes the given test function across each op and its associated dtypes. """
if device_cls is None:
raise RuntimeError('The @ops decorator is only intended to be used in a device-specific '
'context; use it with instantiate_device_type_tests() instead of '
'instantiate_parametrized_tests()')
op = check_exhausted_iterator = object()
for op in self.op_list:
# Determine the set of dtypes to use.
dtypes: Union[Set[torch.dtype], Set[None]]
if isinstance(self.opinfo_dtypes, Sequence):
dtypes = set(self.opinfo_dtypes)
elif self.opinfo_dtypes == OpDTypes.unsupported_backward:
dtypes = set(get_all_dtypes()).difference(op.supported_backward_dtypes(device_cls.device_type))
elif self.opinfo_dtypes == OpDTypes.supported_backward:
dtypes = op.supported_backward_dtypes(device_cls.device_type)
elif self.opinfo_dtypes == OpDTypes.unsupported:
dtypes = set(get_all_dtypes()).difference(op.supported_dtypes(device_cls.device_type))
elif self.opinfo_dtypes == OpDTypes.supported:
dtypes = op.supported_dtypes(device_cls.device_type)
elif self.opinfo_dtypes == OpDTypes.any_one:
# Arbitrary order
dtype_order = (
torch.float32,
torch.float64,
torch.complex64,
torch.complex128,
torch.float16,
torch.bfloat16,
torch.long,
torch.int32,
torch.int16,
torch.int8,
torch.uint8,
torch.bool
)
# Tries to pick a dtype that supports both forward or backward
supported = op.supported_dtypes(device_cls.device_type)
supported_backward = op.supported_backward_dtypes(device_cls.device_type)
supported_both = supported.intersection(supported_backward)
dtype_set = supported_both if len(supported_both) > 0 else supported
for dtype in dtype_order:
if dtype in dtype_set:
dtypes = {dtype}
break
else:
dtypes = {}
elif self.opinfo_dtypes == OpDTypes.none:
dtypes = {None}
else:
raise RuntimeError(f"Unknown OpDType: {self.opinfo_dtypes}")
if self.allowed_dtypes is not None:
dtypes = dtypes.intersection(self.allowed_dtypes)
# Construct the test name; device / dtype parts are handled outside.
# See [Note: device and dtype suffix placement]
test_name = op.formatted_name
for dtype in dtypes:
# Construct parameter kwargs to pass to the test.
param_kwargs = {'op': op}
_update_param_kwargs(param_kwargs, 'dtype', dtype)
# Wraps instantiated test with op decorators
# NOTE: test_wrapper exists because we don't want to apply
# op-specific decorators to the original test.
# Test-specific decorators are applied to the original test,
# however.
try:
@wraps(test)
def test_wrapper(*args, **kwargs):
return test(*args, **kwargs)
for decorator in op.get_decorators(
generic_cls.__name__, test.__name__, device_cls.device_type, dtype):
test_wrapper = decorator(test_wrapper)
yield (test_wrapper, test_name, param_kwargs)
except Exception as ex:
# Provides an error message for debugging before rethrowing the exception
print("Failed to instantiate {0} for op {1}!".format(test_name, op.name))
raise ex
if op is check_exhausted_iterator:
raise ValueError('An empty op_list was passed to @ops. '
'Note that this may result from reuse of a generator.')
# Decorator that skips a test if the given condition is true.
# Notes:
# (1) Skip conditions stack.
# (2) Skip conditions can be bools or strings. If a string the
# test base must have defined the corresponding attribute to be False
# for the test to run. If you want to use a string argument you should
# probably define a new decorator instead (see below).
# (3) Prefer the existing decorators to defining the 'device_type' kwarg.
class skipIf(object):
def __init__(self, dep, reason, device_type=None):
self.dep = dep
self.reason = reason
self.device_type = device_type
def __call__(self, fn):
@wraps(fn)
def dep_fn(slf, *args, **kwargs):
if self.device_type is None or self.device_type == slf.device_type:
if (isinstance(self.dep, str) and getattr(slf, self.dep, True)) or (isinstance(self.dep, bool) and self.dep):
raise unittest.SkipTest(self.reason)
return fn(slf, *args, **kwargs)
return dep_fn
# Skips a test on CPU if the condition is true.
class skipCPUIf(skipIf):
def __init__(self, dep, reason):
super().__init__(dep, reason, device_type='cpu')
# Skips a test on CUDA if the condition is true.
class skipCUDAIf(skipIf):
def __init__(self, dep, reason):
super().__init__(dep, reason, device_type='cuda')
# Skips a test on Meta if the condition is true.
class skipMetaIf(skipIf):
def __init__(self, dep, reason):
super().__init__(dep, reason, device_type='meta')
# Skips a test on XLA if the condition is true.
class skipXLAIf(skipIf):
def __init__(self, dep, reason):
super().__init__(dep, reason, device_type='xla')
def _has_sufficient_memory(device, size):
if torch.device(device).type == 'cuda':
if not torch.cuda.is_available():
return False
gc.collect()
torch.cuda.empty_cache()
# torch.cuda.mem_get_info, aka cudaMemGetInfo, returns a tuple of (free memory, total memory) of a GPU
if device == 'cuda':
device = 'cuda:0'
return torch.cuda.memory.mem_get_info(device)[0] >= size
if device == 'xla':
raise unittest.SkipTest('TODO: Memory availability checks for XLA?')
if device != 'cpu':
raise unittest.SkipTest('Unknown device type')
# CPU
if not HAS_PSUTIL:
raise unittest.SkipTest('Need psutil to determine if memory is sufficient')
# The sanitizers have significant memory overheads
if TEST_WITH_ASAN or TEST_WITH_TSAN or TEST_WITH_UBSAN:
effective_size = size * 10
else:
effective_size = size
if psutil.virtual_memory().available < effective_size:
gc.collect()
return psutil.virtual_memory().available >= effective_size
def largeTensorTest(size, device=None):
"""Skip test if the device has insufficient memory to run the test
size may be a number of bytes, a string of the form "N GB", or a callable
If the test is a device generic test, available memory on the primary device will be checked.
It can also be overriden by the optional `device=` argument.
In other tests, the `device=` argument needs to be specified.
"""
if isinstance(size, str):
assert size.endswith("GB") or size.endswith("gb"), "only bytes or GB supported"
size = 1024 ** 3 * int(size[:-2])
def inner(fn):
@wraps(fn)
def dep_fn(self, *args, **kwargs):
size_bytes = size(self, *args, **kwargs) if callable(size) else size
_device = device if device is not None else self.get_primary_device()
if not _has_sufficient_memory(_device, size_bytes):
raise unittest.SkipTest('Insufficient {} memory'.format(_device))
return fn(self, *args, **kwargs)
return dep_fn
return inner
class expectedFailure(object):
def __init__(self, device_type):
self.device_type = device_type
def __call__(self, fn):
@wraps(fn)
def efail_fn(slf, *args, **kwargs):
if self.device_type is None or self.device_type == slf.device_type:
try:
fn(slf, *args, **kwargs)
except Exception:
return
else:
slf.fail('expected test to fail, but it passed')
return fn(slf, *args, **kwargs)
return efail_fn
class onlyOn(object):
def __init__(self, device_type):
self.device_type = device_type
def __call__(self, fn):
@wraps(fn)
def only_fn(slf, *args, **kwargs):
if self.device_type != slf.device_type:
reason = "Only runs on {0}".format(self.device_type)
raise unittest.SkipTest(reason)
return fn(slf, *args, **kwargs)
return only_fn
# Decorator that provides all available devices of the device type to the test
# as a list of strings instead of providing a single device string.
# Skips the test if the number of available devices of the variant's device
# type is less than the 'num_required_devices' arg.
class deviceCountAtLeast(object):
def __init__(self, num_required_devices):
self.num_required_devices = num_required_devices
def __call__(self, fn):
assert not hasattr(fn, 'num_required_devices'), "deviceCountAtLeast redefinition for {0}".format(fn.__name__)
fn.num_required_devices = self.num_required_devices
@wraps(fn)
def multi_fn(slf, devices, *args, **kwargs):
if len(devices) < self.num_required_devices:
reason = "fewer than {0} devices detected".format(self.num_required_devices)
raise unittest.SkipTest(reason)
return fn(slf, devices, *args, **kwargs)
return multi_fn
# Only runs the test on the native device type (currently CPU, CUDA, Meta)
def onlyNativeDeviceTypes(fn):
@wraps(fn)
def only_fn(self, *args, **kwargs):
if self.device_type not in NATIVE_DEVICES:
reason = "onlyNativeDeviceTypes: doesn't run on {0}".format(self.device_type)
raise unittest.SkipTest(reason)
return fn(self, *args, **kwargs)
return only_fn
# Specifies per-dtype precision overrides.
# Ex.
#
# @precisionOverride({torch.half : 1e-2, torch.float : 1e-4})
# @dtypes(torch.half, torch.float, torch.double)
# def test_X(self, device, dtype):
# ...
#
# When the test is instantiated its class's precision will be set to the
# corresponding override, if it exists.
# self.precision can be accessed directly, and it also controls the behavior of
# functions like self.assertEqual().
#
# Note that self.precision is a scalar value, so if you require multiple
# precisions (or are working with multiple dtypes) they should be specified
# explicitly and computed using self.precision (e.g.
# self.precision *2, max(1, self.precision)).
class precisionOverride(object):
def __init__(self, d):
assert isinstance(d, dict), "precisionOverride not given a dtype : precision dict!"
for dtype, prec in d.items():
assert isinstance(dtype, torch.dtype), "precisionOverride given unknown dtype {0}".format(dtype)
self.d = d
def __call__(self, fn):
fn.precision_overrides = self.d
return fn
# Specifies per-dtype tolerance overrides tol(atol, rtol). It has priority over
# precisionOverride.
# Ex.
#
# @toleranceOverride({torch.float : tol(atol=1e-2, rtol=1e-3},
# torch.double : tol{atol=1e-4, rtol = 0})
# @dtypes(torch.half, torch.float, torch.double)
# def test_X(self, device, dtype):
# ...
#
# When the test is instantiated its class's tolerance will be set to the
# corresponding override, if it exists.
# self.rtol and self.precision can be accessed directly, and they also control
# the behavior of functions like self.assertEqual().
#
# The above example sets atol = 1e-2 and rtol = 1e-3 for torch.float and
# atol = 1e-4 and rtol = 0 for torch.double.
tol = namedtuple('tol', ['atol', 'rtol'])
class toleranceOverride(object):
def __init__(self, d):
assert isinstance(d, dict), "toleranceOverride not given a dtype : tol dict!"
for dtype, prec in d.items():
assert isinstance(dtype, torch.dtype), "toleranceOverride given unknown dtype {0}".format(dtype)
assert isinstance(prec, tol), "toleranceOverride not given a dtype : tol dict!"
self.d = d
def __call__(self, fn):
fn.tolerance_overrides = self.d
return fn
# Decorator that instantiates a variant of the test for each given dtype.
# Notes:
# (1) Tests that accept the dtype argument MUST use this decorator.
# (2) Can be overridden for the CPU or CUDA, respectively, using dtypesIfCPU
# or dtypesIfCUDA.
# (3) Can accept an iterable of dtypes or an iterable of tuples
# of dtypes.
# Examples:
# @dtypes(torch.float32, torch.float64)
# @dtypes((torch.long, torch.float32), (torch.int, torch.float64))
class dtypes(object):
def __init__(self, *args, device_type="all"):
if len(args) > 0 and isinstance(args[0], (list, tuple)):
for arg in args:
assert isinstance(arg, (list, tuple)), \
"When one dtype variant is a tuple or list, " \
"all dtype variants must be. " \
"Received non-list non-tuple dtype {0}".format(str(arg))
assert all(isinstance(dtype, torch.dtype) for dtype in arg), "Unknown dtype in {0}".format(str(arg))
else:
assert all(isinstance(arg, torch.dtype) for arg in args), "Unknown dtype in {0}".format(str(args))
self.args = args
self.device_type = device_type
def __call__(self, fn):
d = getattr(fn, 'dtypes', {})
assert self.device_type not in d, "dtypes redefinition for {0}".format(self.device_type)
d[self.device_type] = self.args
fn.dtypes = d
return fn
# Overrides specified dtypes on the CPU.
class dtypesIfCPU(dtypes):
def __init__(self, *args):
super().__init__(*args, device_type='cpu')
# Overrides specified dtypes on CUDA.
class dtypesIfCUDA(dtypes):
def __init__(self, *args):
super().__init__(*args, device_type='cuda')
class dtypesIfMPS(dtypes):
def __init__(self, *args):
super().__init__(*args, device_type='mps')
def onlyCPU(fn):
return onlyOn('cpu')(fn)
def onlyCUDA(fn):
return onlyOn('cuda')(fn)
def disablecuDNN(fn):
@wraps(fn)
def disable_cudnn(self, *args, **kwargs):
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
return fn(self, *args, **kwargs)
return fn(self, *args, **kwargs)
return disable_cudnn
def disableMkldnn(fn):
@wraps(fn)
def disable_mkldnn(self, *args, **kwargs):
if torch.backends.mkldnn.is_available():
with torch.backends.mkldnn.flags(enabled=False):
return fn(self, *args, **kwargs)
return fn(self, *args, **kwargs)
return disable_mkldnn
def expectedFailureCUDA(fn):
return expectedFailure('cuda')(fn)
def expectedFailureMeta(fn):
return expectedFailure('meta')(fn)
def expectedFailureXLA(fn):
return expectedFailure('xla')(fn)
# This decorator checks that the decorated function produces a nondeterministic
# alert for the expected device types
class expectedAlertNondeterministic:
# Args:
#
# caller_name (str): Name of the operation that produces the
# nondeterministic alert. This name is expected to appear
# in the error/warning message.
#
# device_types (list[str], optional): If provided, the alert is
# expected to only be triggered for the specified devices, and
# no others. If None, then the alert is expected to be triggered
# for all devices. Default: None
#
def __init__(self, caller_name, device_types=None):
if device_types is not None:
assert isinstance(device_types, list)
for device_type in device_types:
assert isinstance(device_type, str)
self.device_types = device_types
self.error_message = caller_name + ' does not have a deterministic implementation, but you set'
def __call__(self, fn):
@wraps(fn)
def efail_fn(slf, device, *args, **kwargs):
should_alert = self.device_types is None or slf.device_type in self.device_types
# Check that errors are thrown correctly
with DeterministicGuard(True):
if should_alert:
with slf.assertRaisesRegex(
RuntimeError,
self.error_message,
msg='expected a non-deterministic error, but it was not raised'):
fn(slf, device, *args, **kwargs)
else:
# If a nondeterministic error is not expected, make sure
# that it is not raised
try:
return fn(slf, device, *args, **kwargs)
except RuntimeError as e:
if 'does not have a deterministic implementation' in str(e):
slf.fail(
'did not expect non-deterministic error message, '
+ 'but got one anyway: "' + str(e) + '"')
# Reraise exceptions unrelated to nondeterminism
raise
# Check that warnings are thrown correctly
if should_alert:
with DeterministicGuard(True, warn_only=True):
with slf.assertWarnsRegex(
UserWarning,
self.error_message):
fn(slf, device, *args, **kwargs)
return efail_fn
# Skips a test on CPU if LAPACK is not available.
def skipCPUIfNoLapack(fn):
return skipCPUIf(not torch._C.has_lapack, "PyTorch compiled without Lapack")(fn)
# Skips a test on CPU if FFT is not available.
def skipCPUIfNoFFT(fn):
return skipCPUIf(not torch._C.has_spectral, "PyTorch is built without FFT support")(fn)
# Skips a test on CPU if MKL is not available.
def skipCPUIfNoMkl(fn):
return skipCPUIf(not TEST_MKL, "PyTorch is built without MKL support")(fn)
# Skips a test on CPU if MKL Sparse is not available (it's not linked on Windows).
def skipCPUIfNoMklSparse(fn):
return skipCPUIf(IS_WINDOWS or not TEST_MKL, "PyTorch is built without MKL support")(fn)
# Skips a test on CPU if mkldnn is not available.
def skipCPUIfNoMkldnn(fn):
return skipCPUIf(not torch.backends.mkldnn.is_available(), "PyTorch is built without mkldnn support")(fn)
# Skips a test on CUDA if MAGMA is not available.
def skipCUDAIfNoMagma(fn):
return skipCUDAIf('no_magma', "no MAGMA library detected")(skipCUDANonDefaultStreamIf(True)(fn))
def has_cusolver():
version = _get_torch_cuda_version()
# cuSolver is disabled on cuda < 10.1.243
return version >= (10, 2)
# Skips a test on CUDA if cuSOLVER is not available
def skipCUDAIfNoCusolver(fn):
return skipCUDAIf(not has_cusolver(), "cuSOLVER not available")(fn)
# Skips a test if both cuSOLVER and MAGMA are not available
def skipCUDAIfNoMagmaAndNoCusolver(fn):
if has_cusolver():
return fn
else:
# cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA
return skipCUDAIfNoMagma(fn)
# Skips a test on CUDA when using ROCm.
def skipCUDAIfRocm(fn):
return skipCUDAIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")(fn)
# Skips a test on CUDA when not using ROCm.
def skipCUDAIfNotRocm(fn):
return skipCUDAIf(not TEST_WITH_ROCM, "test doesn't currently work on the CUDA stack")(fn)
# Skips a test on CUDA if ROCm is unavailable or its version is lower than requested.
def skipCUDAIfRocmVersionLessThan(version=None):
def dec_fn(fn):
@wraps(fn)
def wrap_fn(self, *args, **kwargs):
if self.device_type == 'cuda':
if not TEST_WITH_ROCM:
reason = "ROCm not available"
raise unittest.SkipTest(reason)
rocm_version = str(torch.version.hip)
rocm_version = rocm_version.split("-")[0] # ignore git sha
rocm_version_tuple = tuple(int(x) for x in rocm_version.split("."))
if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version):
reason = "ROCm {0} is available but {1} required".format(rocm_version_tuple, version)
raise unittest.SkipTest(reason)
return fn(self, *args, **kwargs)
return wrap_fn
return dec_fn
# Skips a test on CUDA when using ROCm.
def skipCUDAIfNotMiopenSuggestNHWC(fn):
return skipCUDAIf(not TEST_WITH_MIOPEN_SUGGEST_NHWC, "test doesn't currently work without MIOpen NHWC activation")(fn)
# Skips a test for specified CUDA versions, given in the form of a list of [major, minor]s.
def skipCUDAVersionIn(versions : List[Tuple[int, int]] = None):
def dec_fn(fn):
@wraps(fn)
def wrap_fn(self, *args, **kwargs):
version = _get_torch_cuda_version()
if version == (0, 0): # cpu
return fn(self, *args, **kwargs)
if version in (versions or []):
reason = "test skipped for CUDA version {0}".format(version)
raise unittest.SkipTest(reason)
return fn(self, *args, **kwargs)
return wrap_fn
return dec_fn
# Skips a test on CUDA if cuDNN is unavailable or its version is lower than requested.
def skipCUDAIfCudnnVersionLessThan(version=0):
def dec_fn(fn):
@wraps(fn)
def wrap_fn(self, *args, **kwargs):
if self.device_type == 'cuda':
if self.no_cudnn:
reason = "cuDNN not available"
raise unittest.SkipTest(reason)
if self.cudnn_version is None or self.cudnn_version < version:
reason = "cuDNN version {0} is available but {1} required".format(self.cudnn_version, version)
raise unittest.SkipTest(reason)
return fn(self, *args, **kwargs)
return wrap_fn
return dec_fn
# Skips a test on CUDA if cuSparse generic API is not available
def skipCUDAIfNoCusparseGeneric(fn):
return skipCUDAIf(not TEST_CUSPARSE_GENERIC, "cuSparse Generic API not available")(fn)
def skipCUDAIfNoCudnn(fn):
return skipCUDAIfCudnnVersionLessThan(0)(fn)
def skipCUDAIfMiopen(fn):
return skipCUDAIf(torch.version.hip is not None, "Marked as skipped for MIOpen")(fn)
def skipCUDAIfNoMiopen(fn):
return skipCUDAIf(torch.version.hip is None, "MIOpen is not available")(skipCUDAIfNoCudnn(fn))
def skipMeta(fn):
return skipMetaIf(True, "test doesn't work with meta tensors")(fn)
def skipXLA(fn):
return skipXLAIf(True, "Marked as skipped for XLA")(fn)
| pytorch-master | torch/testing/_internal/common_device_type.py |
import faulthandler
import logging
import multiprocessing
import os
import sys
import tempfile
import threading
import subprocess
import time
import traceback
import types
import unittest
from contextlib import contextmanager
from dataclasses import dataclass
from datetime import timedelta
from enum import Enum
from functools import (
partial,
reduce,
wraps
)
from io import StringIO
from typing import NamedTuple, Optional, Union
import torch
import torch.cuda.nccl
import torch.distributed as c10d
from torch.testing._internal.common_utils import (
TestCase,
TEST_WITH_ROCM,
TEST_WITH_TSAN,
FILE_SCHEMA,
find_free_port,
retry_on_connect_failures,
IS_SANDCASTLE,
sandcastle_skip_if,
sandcastle_skip,
)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class TestSkip(NamedTuple):
exit_code: int
message: str
TEST_SKIPS = {
"backend_unavailable": TestSkip(
72, "Skipped because distributed backend is not available."
),
"small_worldsize": TestSkip(73, "Skipped due to small world size."),
"odd_worldsize": TestSkip(87, "Skipped due to odd world size."),
"no_cuda": TestSkip(74, "CUDA is not available."),
"multi-gpu-1": TestSkip(75, "Need at least 1 CUDA device"),
"multi-gpu-2": TestSkip(77, "Need at least 2 CUDA devices"),
"multi-gpu-3": TestSkip(80, "Need at least 3 CUDA devices"),
"multi-gpu-4": TestSkip(81, "Need at least 4 CUDA devices"),
"multi-gpu-5": TestSkip(82, "Need at least 5 CUDA devices"),
"multi-gpu-6": TestSkip(83, "Need at least 6 CUDA devices"),
"multi-gpu-7": TestSkip(84, "Need at least 7 CUDA devices"),
"multi-gpu-8": TestSkip(85, "Need at least 8 CUDA devices"),
"nccl": TestSkip(76, "c10d not compiled with NCCL support"),
"skipIfRocm": TestSkip(78, "Test skipped for ROCm"),
"no_peer_access": TestSkip(79, "Test skipped because no GPU peer access"),
"generic": TestSkip(
86, "Test skipped at subprocess level, look at subprocess log for skip reason"
),
}
@dataclass
class DistTestCases:
# Backends that do not support a specific collective
skip_collective = {}
skip_collective["allgather_coalesced"] = {"nccl", "mpi"}
skip_collective["reduce"] = set()
skip_collective["sendrecv anysource"] = {"nccl"}
skip_collective["cpu barrier"] = {"nccl"}
# Sets showing that something is implemented
backend_feature = {}
backend_feature["gpu"] = {"nccl", "gloo"}
backend_feature["cuda"] = {"nccl", "gloo"}
backend_feature["ddp"] = {"nccl", "gloo"}
backend_feature["subgroup"] = {"nccl", "gloo"}
backend_feature["plugin"] = set()
def skip_if_no_gpu(func):
"""Skips if the world size exceeds the number of GPUs, ensuring that if the
test is run, each rank has its own GPU via ``torch.cuda.device(rank)``."""
@wraps(func)
def wrapper(*args, **kwargs):
if not torch.cuda.is_available():
sys.exit(TEST_SKIPS["no_cuda"].exit_code)
world_size = int(os.environ["WORLD_SIZE"])
if torch.cuda.device_count() < world_size:
sys.exit(TEST_SKIPS[f"multi-gpu-{world_size}"].exit_code)
return func(*args, **kwargs)
return wrapper
def skip_if_small_worldsize(func):
@wraps(func)
def wrapper(*args, **kwargs):
if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) <= 2:
sys.exit(TEST_SKIPS["small_worldsize"].exit_code)
return func(*args, **kwargs)
return wrapper
def skip_if_odd_worldsize(func):
@wraps(func)
def wrapper(*args, **kwargs):
if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) % 2 == 1:
sys.exit(TEST_SKIPS["odd_worldsize"].exit_code)
return func(*args, **kwargs)
return wrapper
def require_n_gpus_for_nccl_backend(n, backend):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if backend == "nccl" and torch.cuda.device_count() < n:
sys.exit(TEST_SKIPS[f"multi-gpu-{n}"].exit_code)
else:
return func(*args, **kwargs)
return wrapper
return decorator
def skip_if_lt_x_gpu(x):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if torch.cuda.is_available() and torch.cuda.device_count() >= x:
return func(*args, **kwargs)
sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code)
return wrapper
return decorator
# This decorator helps avoiding initializing cuda while testing other backends
def nccl_skip_if_lt_x_gpu(backend, x):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if backend != "nccl":
return func(*args, **kwargs)
if torch.cuda.is_available() and torch.cuda.device_count() >= x:
return func(*args, **kwargs)
sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code)
return wrapper
return decorator
def verify_ddp_error_logged(model_DDP, err_substr):
# Verify error was logged in ddp_logging_data.
ddp_logging_data = model_DDP._get_ddp_logging_data()
assert "iteration" in ddp_logging_data
assert "has_error" in ddp_logging_data
assert "error" in ddp_logging_data
logging_err = ddp_logging_data["error"]
# Remove C++ stacktrace if needed.
actual = (
err_substr if err_substr.find("\nException raised from ") == -1
else err_substr.split("\nException raised from ")[0]
)
assert actual in logging_err, f"Did not find expected {actual} in ddp logging data error: {logging_err}"
def with_nccl_blocking_wait(func):
"""
Convenience decorator to set/unset NCCL_BLOCKING_WAIT flag. Note that use of
this decorator will override the setting of NCCL_ASYNC_ERROR_HANDLING for
the particular test. After the test, both NCCL_BLOCKING_WAIT and
NCCL_ASYNC_ERROR_HANDLING will be restored to their original values.
"""
@wraps(func)
def wrapper(*args, **kwargs):
# Save and unset NCCL_ASYNC_ERROR_HANDLING
try:
cached_nccl_async_error_handling: Union[str, None] = os.environ[
"NCCL_ASYNC_ERROR_HANDLING"
]
del os.environ["NCCL_ASYNC_ERROR_HANDLING"]
except KeyError:
# NCCL_ASYNC_ERROR_HANDLING was unset
cached_nccl_async_error_handling = None
# Save val of NCCL_BLOCKING_WAIT and set it.
try:
cached_nccl_blocking_wait: Union[str, None] = os.environ[
"NCCL_BLOCKING_WAIT"
]
except KeyError:
cached_nccl_blocking_wait = None
finally:
os.environ["NCCL_BLOCKING_WAIT"] = "1"
try:
ret = func(*args, **kwargs)
return ret
finally:
# restore old values.
if cached_nccl_async_error_handling is not None:
os.environ[
"NCCL_ASYNC_ERROR_HANDLING"
] = cached_nccl_async_error_handling
if cached_nccl_blocking_wait is not None:
os.environ["NCCL_BLOCKING_WAIT"] = cached_nccl_blocking_wait
return wrapper
def with_dist_debug_levels(levels):
"""
Runs a test for each distributed debug level specified in levels.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
old_level = os.environ.get("TORCH_DISTRIBUTED_DEBUG", None)
for level in levels:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = level
c10d.set_debug_level_from_env()
ret = func(*args, **kwargs)
c10d.barrier()
if old_level is not None:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = old_level
# Only returns test return for last test, but since these are
# unittests the return value is not really used and earlier tests
# would've raised had they failed.
return ret
return wrapper
return decorator
def requires_gloo():
return sandcastle_skip_if(
not c10d.is_gloo_available(),
"c10d was not compiled with the Gloo backend",
)
def requires_nccl_version(version, msg):
if not c10d.is_nccl_available():
return sandcastle_skip(
"c10d was not compiled with the NCCL backend",
)
else:
return sandcastle_skip_if(
torch.cuda.nccl.version() < version,
"Requires NCCL version greater than or equal to: {}, found: {}, reason: {}".format(
version, torch.cuda.nccl.version(), msg
),
)
def requires_nccl():
return sandcastle_skip_if(
not c10d.is_nccl_available(),
"c10d was not compiled with the NCCL backend",
)
def requires_mpi():
return sandcastle_skip_if(
not c10d.is_mpi_available(),
"c10d was not compiled with the MPI backend",
)
def skip_if_rocm(func):
"""Skips a test for ROCm"""
func.skip_if_rocm = True
@wraps(func)
def wrapper(*args, **kwargs):
if not TEST_WITH_ROCM:
return func(*args, **kwargs)
sys.exit(TEST_SKIPS["skipIfRocm"].exit_code)
return wrapper
def skip_if_win32():
return sandcastle_skip_if(
sys.platform == 'win32',
"This unit test case is not supportted on Windows platform",
)
@retry_on_connect_failures
def create_tcp_store(
addr="localhost",
world_size=1,
is_master=True,
timeout=timedelta(minutes=5),
wait_for_workers=True,
jit_class=False,
):
"""
Creates a TCP store. Retries if the chosen port is already in use.
"""
port = find_free_port()
if jit_class:
timeout_millisecond = int(timeout / timedelta(milliseconds=1))
return torch.classes.dist_c10d.TCPStore(
addr, port, world_size, is_master, timeout_millisecond
)
else:
return c10d.TCPStore(
addr, port, world_size, is_master, wait_for_workers=wait_for_workers
)
if TEST_WITH_TSAN:
# TSAN runs much slower.
TIMEOUT_DEFAULT = 500
else:
TIMEOUT_DEFAULT = 100
TIMEOUT_OVERRIDE = {"test_ddp_uneven_inputs": 400}
# https://github.com/pytorch/pytorch/issues/75665
if TEST_WITH_ROCM:
TIMEOUT_OVERRIDE["test_join_kwargs"] = 200
def create_device(interface=None):
if sys.platform == "win32" or interface is None:
return c10d.ProcessGroupGloo.create_device(hostname="127.0.0.1")
else:
return c10d.ProcessGroupGloo.create_device(interface=interface)
def get_timeout(test_id) -> int:
return TIMEOUT_OVERRIDE.get(test_id.split(".")[-1], TIMEOUT_DEFAULT)
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
def simple_sparse_reduce_tests(rank: int, world_size: int, num_inputs: int = 1):
"""
Generate a number of basic test cases for sparse reduction.
These cover tensors with a varying number of sparse dimensions and a varying
number of dense dimensions. The only reduction operation we support is sum.
"""
def generate(rank: int, world_size: int, sparse_dims: int = 1, dense_dims: int = 0):
# First sparse dimension is [0..rank].
# Subsequent dimensions are always 0, so we know there is
# a non-empty intersection between any two sparse tensors.
indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1))
shape = [world_size] + [2 for _ in range(dense_dims)]
for _ in range(sparse_dims - 1):
indices = torch.cat((indices, torch.zeros(1, rank + 1)))
shape.append(world_size)
values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)])
return torch.sparse_coo_tensor(indices, values, shape)
def compute_sum(fn, world_size: int):
return reduce(
lambda a, b: a + b, [fn(rank, world_size) for rank in range(world_size)]
)
return [
(
[
fn(num_inputs * rank + i, num_inputs * world_size)
for i in range(num_inputs)
],
[compute_sum(fn, num_inputs * world_size) for i in range(num_inputs)],
)
for fn in [
partial(generate, sparse_dims=1),
partial(generate, sparse_dims=2),
partial(generate, sparse_dims=3),
partial(generate, dense_dims=1),
partial(generate, dense_dims=2),
partial(generate, dense_dims=3),
]
]
# HELPER FOR MULTIGPU TESTS
def init_multigpu_helper(world_size: int, backend: str):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
nGPUs = torch.cuda.device_count()
visible_devices = range(nGPUs)
if backend == "nccl":
# This is a hack for a known NCCL issue using multiprocess
# in conjunction with multiple threads to manage different GPUs which
# may cause ncclCommInitRank to fail.
# http://docs.nvidia.com/deeplearning/sdk/nccl-release-notes/rel_2.1.4.html#rel_2.1.4
# It slows down the performance of collective operations.
# Without this setting NCCL might throw unhandled error.
os.environ["NCCL_MAX_NRINGS"] = "1"
# If rank is less than or equal to number of available GPU's
# then each rank can be mapped to corresponding GPU.
nGPUs_per_process = 1
if world_size > nGPUs:
nGPUs_per_process = nGPUs // world_size
rank_to_GPU = {
i: list(
visible_devices[i * nGPUs_per_process : (i + 1) * nGPUs_per_process]
)
for i in range(world_size)
}
return rank_to_GPU
tmp_dir: Optional[tempfile.TemporaryDirectory] = None
def initialize_temp_directories(init_method: Optional[str] = None) -> None:
global tmp_dir
tmp_dir = tempfile.TemporaryDirectory()
os.environ["TEMP_DIR"] = tmp_dir.name
os.mkdir(os.path.join(tmp_dir.name, "barrier"))
os.mkdir(os.path.join(tmp_dir.name, "test_dir"))
init_dir_path = os.path.join(tmp_dir.name, "init_dir")
os.mkdir(init_dir_path)
# Set init method if specified.
if init_method is not None:
os.environ["INIT_METHOD"] = init_method
else:
os.environ["INIT_METHOD"] = FILE_SCHEMA + os.path.join(
init_dir_path, "shared_init_file"
)
def cleanup_temp_dir() -> None:
if tmp_dir is not None:
tmp_dir.cleanup()
# [How does MultiProcessTestCase work?]
# Each MultiProcessTestCase instance uses 1 + `world_size()` processes, by
# default `world_size()` returns 4. Let's take `test_rpc_spawn.py` as an
# example which inherits from this class. Its `Setup()` methods calls into
# `MultiProcessTestCase._spawn_processes()` which spawns `world_size()`
# subprocesses. During the spawn, the main process passes the test name to
# subprocesses, and the name is acquired from self.id(). The subprocesses
# then use the provided test function name to retrieve the function attribute
# from the test instance and run it. The main process simply waits for all
# subprocesses to join.
class MultiProcessTestCase(TestCase):
MAIN_PROCESS_RANK = -1
# This exit code is used to indicate that the test code had an error and
# exited abnormally. There are certain tests that might use sys.exit() to
# simulate failures and in those cases, we can't have an exit code of 0,
# but we still want to ensure we didn't run into any other errors.
TEST_ERROR_EXIT_CODE = 10
# do not early terminate for distributed tests.
def _should_stop_test_suite(self) -> bool:
return False
@property
def world_size(self) -> int:
return 4
def join_or_run(self, fn):
@wraps(fn)
def wrapper(self):
if self.rank == self.MAIN_PROCESS_RANK:
self._join_processes(fn)
else:
fn()
return types.MethodType(wrapper, self)
# The main process spawns N subprocesses that run the test.
# Constructor patches current instance test method to
# assume the role of the main process and join its subprocesses,
# or run the underlying test function.
def __init__(self, method_name: str = "runTest") -> None:
super().__init__(method_name)
fn = getattr(self, method_name)
setattr(self, method_name, self.join_or_run(fn))
def setUp(self) -> None:
super().setUp()
self.skip_return_code_checks = [] # type: ignore[var-annotated]
self.processes = [] # type: ignore[var-annotated]
self.rank = self.MAIN_PROCESS_RANK
self.file_name = tempfile.NamedTemporaryFile(delete=False).name
# pid to pipe consisting of error message from process.
self.pid_to_pipe = {} # type: ignore[var-annotated]
def tearDown(self) -> None:
super().tearDown()
for p in self.processes:
p.terminate()
# Each Process instance holds a few open file descriptors. The unittest
# runner creates a new TestCase instance for each test method and keeps
# it alive until the end of the entire suite. We must thus reset the
# processes to prevent an effective file descriptor leak.
self.processes = []
def _current_test_name(self) -> str:
# self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
return self.id().split(".")[-1]
def _start_processes(self, proc) -> None:
self.processes = []
for rank in range(int(self.world_size)):
parent_conn, child_conn = torch.multiprocessing.Pipe()
process = proc(
target=self.__class__._run,
name="process " + str(rank),
args=(rank, self._current_test_name(), self.file_name, child_conn),
)
process.start()
logger.info(f"Started process {rank} with pid {process.pid}")
self.pid_to_pipe[process.pid] = parent_conn
self.processes.append(process)
def _spawn_processes(self) -> None:
proc = torch.multiprocessing.get_context("spawn").Process
self._start_processes(proc)
class Event(Enum):
GET_TRACEBACK = 1
@staticmethod
def _event_listener(parent_pipe, signal_pipe, rank: int):
logger.info(f"Starting event listener thread for rank {rank}")
while True:
ready_pipes = multiprocessing.connection.wait([parent_pipe, signal_pipe])
if parent_pipe in ready_pipes:
if parent_pipe.closed:
logger.info(
f"Pipe closed for process {rank}, stopping event listener thread"
)
return
event = parent_pipe.recv()
logger.info(f"Received event {event} on process {rank}")
if event == MultiProcessTestCase.Event.GET_TRACEBACK:
# Return traceback to the parent process.
with tempfile.NamedTemporaryFile(mode="r+") as tmp_file:
faulthandler.dump_traceback(tmp_file)
# Flush buffers and seek to read from the beginning
tmp_file.flush()
tmp_file.seek(0)
parent_pipe.send(tmp_file.read())
logger.info(f"Process {rank} sent traceback")
if signal_pipe in ready_pipes:
return
@classmethod
def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe) -> None:
# Enable DDP + ReplicatedTensor
from torch.nn.parallel._replicated_tensor_ddp_utils import _set_ddp_with_replicated_tensor
_set_ddp_with_replicated_tensor(True)
self = cls(test_name)
self.rank = rank
self.file_name = file_name
self.run_test(test_name, parent_pipe)
def run_test(self, test_name: str, parent_pipe) -> None:
# Start event listener thread.
signal_recv_pipe, signal_send_pipe = torch.multiprocessing.Pipe(duplex=False)
event_listener_thread = threading.Thread(
target=MultiProcessTestCase._event_listener,
args=(parent_pipe, signal_recv_pipe, self.rank),
daemon=True,
)
event_listener_thread.start()
if sys.platform != "win32" and sys.platform != "darwin":
# Register signal handler to dump stack traces on FATALs.
# Windows and MacOS do not support the signal handlers.
torch._C._set_print_stack_traces_on_fatal_signal(True)
# Show full C++ stacktraces when a Python error originating from C++ is raised.
os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1"
# self.id() == e.g. '__main__.TestDistributed.test_get_rank'
# We're retrieving a corresponding test and executing it.
try:
getattr(self, test_name)()
except unittest.SkipTest as se:
logger.info(
f"Process {self.rank} skipping test {test_name} for following reason: {str(se)}"
)
sys.exit(TEST_SKIPS["generic"].exit_code)
except Exception as e:
logger.error(
f"Caught exception: \n{traceback.format_exc()} exiting "
f"process {self.rank} with exit code: {MultiProcessTestCase.TEST_ERROR_EXIT_CODE}"
)
# Send error to parent process.
parent_pipe.send(traceback.format_exc())
sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE)
finally:
if signal_send_pipe is not None:
signal_send_pipe.send(None)
assert event_listener_thread is not None
event_listener_thread.join()
# Close pipe after done with test.
parent_pipe.close()
def _get_timedout_process_traceback(self) -> None:
pipes = []
for i, process in enumerate(self.processes):
if process.exitcode is None:
pipe = self.pid_to_pipe[process.pid]
try:
pipe.send(MultiProcessTestCase.Event.GET_TRACEBACK)
pipes.append((i, pipe))
except ConnectionError as e:
logger.error(
f"Encountered error while trying to get traceback for process {i}: {e}"
)
# Wait for results.
for rank, pipe in pipes:
try:
# Wait for traceback
if pipe.poll(5):
if pipe.closed:
logger.info(
f"Pipe closed for process {rank}, cannot retrieve traceback"
)
continue
traceback = pipe.recv()
logger.error(
f"Process {rank} timed out with traceback: \n\n{traceback}"
)
else:
logger.error(
f"Could not retrieve traceback for timed out process: {rank}"
)
except ConnectionError as e:
logger.error(
f"Encountered error while trying to get traceback for process {rank}: {e}"
)
def _join_processes(self, fn) -> None:
timeout = get_timeout(self.id())
start_time = time.time()
subprocess_error = False
try:
while True:
# check to see if any subprocess exited with an error early.
for (i, p) in enumerate(self.processes):
# This is the exit code processes exit with if they
# encountered an exception.
if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE:
print(
f"Process {i} terminated with exit code {p.exitcode}, terminating remaining processes."
)
active_children = torch.multiprocessing.active_children()
for ac in active_children:
ac.terminate()
subprocess_error = True
break
if subprocess_error:
break
# All processes have joined cleanly if they all a valid exitcode
if all([p.exitcode is not None for p in self.processes]):
break
# Check if we should time out the test. If so, we terminate each process.
elapsed = time.time() - start_time
if elapsed > timeout:
self._get_timedout_process_traceback()
print(
f"Timing out after {timeout} seconds and killing subprocesses."
)
for p in self.processes:
p.terminate()
break
# Sleep to avoid excessive busy polling.
time.sleep(0.1)
elapsed_time = time.time() - start_time
if fn in self.skip_return_code_checks:
self._check_no_test_errors(elapsed_time)
else:
self._check_return_codes(elapsed_time)
finally:
# Close all pipes
for pid, pipe in self.pid_to_pipe.items():
pipe.close()
def _check_no_test_errors(self, elapsed_time) -> None:
"""
Checks that we didn't have any errors thrown in the child processes.
"""
for i, p in enumerate(self.processes):
if p.exitcode is None:
raise RuntimeError(
"Process {} timed out after {} seconds".format(i, elapsed_time)
)
self.assertNotEqual(self.TEST_ERROR_EXIT_CODE, p.exitcode)
def _check_return_codes(self, elapsed_time) -> None:
"""
Checks that the return codes of all spawned processes match, and skips
tests if they returned a return code indicating a skipping condition.
"""
first_process = self.processes[0]
# first, we check if there are errors in actual processes
# (via TEST_ERROR_EXIT CODE), and raise an exception for those.
# the reason we do this is to attempt to raise a more helpful error
# message than "Process x terminated/timed out"
# TODO: we should pipe the exception of the failed subprocess here.
# Currently, the actual exception is displayed as a logging output.
errored_processes = [
(i, p)
for i, p in enumerate(self.processes)
if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE
]
if errored_processes:
error = ""
for i, process in errored_processes:
# Get error from pipe.
error_message = self.pid_to_pipe[process.pid].recv()
error += (
"Process {} exited with error code {} and exception:\n{}\n".format(
i, MultiProcessTestCase.TEST_ERROR_EXIT_CODE, error_message
)
)
raise RuntimeError(error)
# If no process exited uncleanly, we check for timeouts, and then ensure
# each process exited cleanly.
for i, p in enumerate(self.processes):
if p.exitcode is None:
raise RuntimeError(
"Process {} terminated or timed out after {} seconds".format(
i, elapsed_time
)
)
self.assertEqual(
p.exitcode,
first_process.exitcode,
msg="Expect process {} exit code to match Process 0 exit code of {}, but got {}".format(
i, first_process.exitcode, p.exitcode
),
)
for skip in TEST_SKIPS.values():
if first_process.exitcode == skip.exit_code:
if IS_SANDCASTLE:
# Don't use unittest.skip to skip the test on sandcastle
# since it creates tasks for skipped tests assuming there
# is some follow-up needed. Instead just "pass" the test
# with an appropriate message.
logger.info(
f"Skipping {self.id()} on sandcastle for the following reason: {skip.message}"
)
return
else:
raise unittest.SkipTest(skip.message)
self.assertEqual(
first_process.exitcode,
0,
msg="Expected zero exit code but got {} for pid: {}".format(first_process.exitcode, first_process.pid)
)
@property
def is_master(self) -> bool:
return self.rank == 0
# Cannot use functools.cache as it requires python 3.9
EFA_PROBE_RESULT = None
def has_efa() -> bool:
"""
If shell command `fi_info -p efa -t FI_EP_RDM` returns exit code 0 then we assume that the machine has
Libfabric EFA interfaces and EFA software components installed,
see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa-start.html.
"""
global EFA_PROBE_RESULT
if EFA_PROBE_RESULT is not None:
return EFA_PROBE_RESULT
try:
EFA_PROBE_RESULT = subprocess.run(["fi_info", "-p", "efa", "-t", "FI_EP_RDM"]).returncode == 0
except FileNotFoundError:
EFA_PROBE_RESULT = False
return EFA_PROBE_RESULT
def tp_transports():
"""
If the machine has Libfabric EFA interfaces and EFA software components installed it may cause
'RuntimeError: In operator() at tensorpipe/common/ibv.h:172 "": Operation not supported' if tensorpipe
uses InfiniBand transport, so we exclude it from tensorpipe transports,
see https://github.com/pytorch/pytorch/issues/73885 and https://github.com/pytorch/pytorch/issues/65022
"""
return ["shm", "uv"] if has_efa() else None
| pytorch-master | torch/testing/_internal/common_distributed.py |
pytorch-master | torch/testing/_internal/__init__.py |
|
from functools import wraps, partial
from itertools import product, chain, islice
import itertools
import functools
import collections
import copy
import operator
import random
import unittest
import math
import torch
import numpy as np
from torch._six import inf
import collections.abc
from typing import Any, Dict, List, Sequence, Tuple, Union, Iterable
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
_dispatch_dtypes, floating_types, floating_types_and, complex_types, floating_and_complex_types,
floating_and_complex_types_and, all_types_and_complex_and, all_types_and, all_types_and_complex, integral_types_and,
all_types, double_types, empty_types, complex_types_and, integral_types
)
from torch.testing._internal.common_device_type import \
(onlyCUDA, onlyNativeDeviceTypes, disablecuDNN, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver,
skipCUDAIfNoCusolver, skipCPUIfNoLapack, skipCPUIfNoFFT, skipCUDAIfRocm, skipCUDAIf, precisionOverride,
skipCPUIfNoMklSparse,
toleranceOverride, tol, has_cusolver)
from torch.testing._internal.common_cuda import (
CUDA11OrLater, SM53OrLater, SM60OrLater, with_tf32_off, TEST_CUDNN,
_get_torch_cuda_version, _get_magma_version)
from torch.testing._internal.common_utils import (
make_fullrank_matrices_with_distinct_singular_values,
TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, TEST_SCIPY,
torch_to_numpy_dtype_dict, TEST_WITH_ASAN,
GRADCHECK_NONDET_TOL, slowTest, freeze_rng_state,
)
import torch._refs as refs # noqa: F401
import torch._refs.nn.functional
import torch._refs.special
import torch._refs.linalg
import torch._prims as prims # noqa: F401
from torch.utils._pytree import tree_flatten
from distutils.version import LooseVersion
from torch.testing._internal.opinfo.core import ( # noqa: F401
L,
M,
S,
XS,
_NOTHING,
_getattr_qual,
DecorateInfo,
SampleInput,
ErrorInput,
AliasInfo,
OpInfo,
_find_referenced_opinfo,
_inherit_constructor_args,
PythonRefInfo,
_generate_reduction_inputs,
_generate_reduction_kwargs,
sample_inputs_reduction,
ReductionOpInfo,
reference_inputs_elementwise_binary,
make_error_inputs_elementwise_binary,
generate_elementwise_binary_tensors,
generate_elementwise_binary_arbitrarily_strided_tensors,
generate_elementwise_binary_small_value_tensors,
generate_elementwise_binary_large_value_tensors,
generate_elementwise_binary_extremal_value_tensors,
generate_elementwise_binary_broadcasting_tensors,
generate_elementwise_binary_with_scalar_samples,
generate_elementwise_binary_with_scalar_and_type_promotion_samples,
generate_elementwise_binary_noncontiguous_tensors,
sample_inputs_elementwise_binary,
BinaryUfuncInfo,
sample_inputs_elementwise_unary,
generate_elementwise_unary_tensors,
generate_elementwise_unary_small_value_tensors,
generate_elementwise_unary_large_value_tensors,
generate_elementwise_unary_extremal_value_tensors,
reference_inputs_elementwise_unary,
UnaryUfuncInfo,
sample_inputs_spectral_ops,
SpectralFuncType,
SpectralFuncInfo,
ShapeFuncInfo,
sample_inputs_foreach,
ForeachFuncInfo,
)
has_scipy_fft = False
if TEST_SCIPY:
from scipy import stats
import scipy.spatial
import scipy.special
try:
import scipy.fft
has_scipy_fft = True
except ModuleNotFoundError:
pass
# test if a tensor is close to an integer
def close_to_int(x, eps=0.1):
if x.is_complex():
y = torch.abs(torch.view_as_complex(torch.frac(torch.view_as_real(x))))
else:
y = torch.abs(torch.frac(x))
return (y < eps) | (y > (1 - eps))
NumericsFilter = collections.namedtuple('NumericsFilter', ['condition', 'safe_val'])
def _generate_masked_op_mask(input_shape, device, **kwargs):
yield None
yield make_tensor(input_shape, dtype=torch.bool, device=device, requires_grad=False)
if len(input_shape) > 2:
# broadcast last mask dimension:
yield make_tensor(input_shape[:-1] + (1,), dtype=torch.bool, device=device, requires_grad=False)
# broadcast middle mask dimension:
yield make_tensor(input_shape[:1] + (1,) + input_shape[2:], dtype=torch.bool, device=device, requires_grad=False)
# broadcast first mask dimension:
yield make_tensor((1,) + input_shape[1:], dtype=torch.bool, device=device, requires_grad=False)
# mask.ndim < input.ndim
yield make_tensor(input_shape[1:], dtype=torch.bool, device=device, requires_grad=False)
# mask.ndim == 1
yield make_tensor(input_shape[-1:], dtype=torch.bool, device=device, requires_grad=False)
# masks that require broadcasting of inputs (mask.ndim >
# input.ndim) will not be supported, however, we may
# reconsider this if there will be demand on this kind of
# degenerate cases.
def sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked reduction operators.
Masked reduction operator is a reduction operator with trailing
mask optional argument. A mask is a bool tensor with the same
shape as input or a shape that is broadcastable to input shape.
"""
kwargs['supports_multiple_dims'] = op_info.supports_multiple_dims
for sample_input in sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs):
for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs):
sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs)
yield SampleInput(sample_input.input.detach().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs)
if(not requires_grad and dtype.is_floating_point and
sample_input.input.ndim == 2 and mask is not None and
mask.shape == sample_input.input.shape):
for v in [torch.inf, -torch.inf, torch.nan]:
t = sample_input.input.detach()
t.diagonal(0, -2, -1).fill_(v)
yield SampleInput(t.requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs)
def sample_inputs_sparse_coo_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked reduction operators that support inputs
with sparse coo layouts.
"""
if op_info.supports_sparse:
op_name = op_info.name.replace('_masked.', '')
for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):
mask = sample_input.kwargs.get('mask')
if mask is not None:
sample_input_kwargs = sample_input.kwargs.copy()
sample_input_kwargs.update(mask=mask.to_sparse())
yield SampleInput(sample_input.input.to_sparse(),
args=sample_input.args, kwargs=sample_input_kwargs)
else:
if op_name in {'prod', 'amax', 'amin'}:
# FIXME: for now reductions with non-zero reduction identity and
# unspecified mask are not supported for sparse COO
# tensors, see torch._masked.prod implementation
# for details.
continue
yield SampleInput(sample_input.input.to_sparse(),
args=sample_input.args, kwargs=sample_input.kwargs)
def sample_inputs_sparse_csr_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked reduction operators that support inputs
with sparse csr layouts.
"""
if op_info.supports_sparse_csr:
op_name = op_info.name.replace('_masked.', '')
for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):
if not (sample_input.input.ndim == 2 and sample_input.kwargs.get('keepdim')):
# - sparse CSR tensors are always 2-D tensors
# - masked reduction on CSR tensors are defined only if keepdim is True.
continue
mask = sample_input.kwargs.get('mask')
if mask is not None:
sample_input_kwargs = sample_input.kwargs.copy()
sample_input_kwargs.update(mask=mask.to_sparse_csr())
new_sample = SampleInput(sample_input.input.to_sparse_csr(),
args=sample_input.args, kwargs=sample_input_kwargs)
else:
if op_name in ['prod', 'amax', 'amin', 'mean']:
# reductions with non-zero reduction identity and
# unspecified mask is not supported for sparse CSR
# tensors, see torch._masked.prod implementation
# for details.
continue
new_sample = SampleInput(sample_input.input.to_sparse_csr(),
args=sample_input.args, kwargs=sample_input.kwargs)
yield new_sample
if sample_input.kwargs['dim'] == 0:
# Reductions of CSR tensors use different implementations for
# inner and/or outer dimensions. So, as a minimum of testing CSR
# implementations the following kwargs must be generated:
# dict(dim=0, keepdim=True)
# dict(dim=1, keepdim=True)
# dict(dim=(0, 1), keepdim=True)
# Here we generate the dim=1 case from the dim=0 case.
sample_input_kwargs = new_sample.kwargs.copy()
sample_input_kwargs.update(dim=1)
yield SampleInput(new_sample.input.clone(),
args=sample_input.args, kwargs=sample_input_kwargs)
def sample_inputs_masked_norm(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked norm.
"""
for ord in [2.0, 1, float('inf'), float('-inf'), 0]:
for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):
sample_input_args, sample_input_kwargs = (ord,) + sample_input.args, sample_input.kwargs.copy()
yield SampleInput(sample_input.input.clone().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs)
def sample_inputs_masked_std_var(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked std/var.
"""
for unbiased in [False, True]:
for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):
if sample_input.args:
dim = sample_input.args[0]
sample_input_args = sample_input.args[:1] + (unbiased,) + sample_input.args[1:]
sample_input_kwargs = sample_input.kwargs.copy()
else:
dim = sample_input.kwargs.get('dim')
sample_input_args = sample_input.args
sample_input_kwargs = dict(sample_input.kwargs, unbiased=unbiased)
if requires_grad:
if sample_input_kwargs.get('mask') is None:
orig_count = torch._masked.sum(torch.ones(sample_input.input.shape, dtype=torch.int64), dim, keepdim=True)
else:
inmask = torch._masked._input_mask(sample_input.input, *sample_input_args, **sample_input_kwargs)
orig_count = torch._masked.sum(inmask.new_ones(sample_input.input.shape, dtype=torch.int64),
dim, keepdim=True, mask=inmask)
if orig_count.min() <= int(unbiased) + 1:
# Skip samples that lead to singularities in var
# computation resulting nan values both in var and
# autograd output that test_grad_fn cannot handle
# correctly. Also, skip samples when the autograd output
# for std could not be handled correctly due to torch.sqrt
continue
yield SampleInput(sample_input.input.detach().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs)
def sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
args_cases = (
# Cases with tensor indices.
(torch.tensor([1, 2, 3]),),
(torch.tensor(1),),
(torch.tensor([1, 2, 3]), 1),
(torch.tensor([1, 4, 2, 5, 3, 6])[::2], 1),
# Cases with list of indices.
((2, 4),),
((2, 4), 1),
((2, 4), -1),
# Cases with integer section.
(3,),
(3, 1),
(3, -1),
)
for args in args_cases:
yield SampleInput(make_input((S, S, S)), args=args)
def sample_inputs_linalg_det_logdet_slogdet(op_info, device, dtype, requires_grad, **kwargs):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad)
batches = [(), (0, ), (3, )]
ns = [0, 1, 5]
is_logdet = (op_info.name == "logdet")
for batch, n, in product(batches, ns):
shape = batch + (n, n)
A = make_arg(*shape)
# Need to make the matrices in A have positive determinant for autograd
# To do so, we multiply A by its determinant to flip the sign of its determinant
if is_logdet and not A.is_complex() and A.numel() > 0:
s = torch.linalg.slogdet(A).sign
A = A * s.unsqueeze(-1).unsqueeze(-1)
A.requires_grad_(requires_grad)
yield SampleInput(A)
def sample_inputs_linalg_det_singular(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype)
def make_singular_matrix_batch_base(size, rank):
assert size[-1] == size[-2]
assert rank > 0 and rank < size[-1]
n = size[-1]
a = make_arg(size[:-2] + (n, rank)) / 10
b = make_arg(size[:-2] + (rank, n)) / 10
x = a @ b
lu, pivs, _ = torch.linalg.lu_factor_ex(x)
p, l, u = torch.lu_unpack(lu, pivs)
u_diag_abs = u.diagonal(0, -2, -1).abs()
u_diag_abs_largest = u_diag_abs.max(dim=-1, keepdim=True).values
u_diag_abs_smallest_idxs = torch.topk(u_diag_abs, k=(n - rank), largest=False).indices
u.diagonal(0, -2, -1).div_(u_diag_abs_largest)
u.diagonal(0, -2, -1)[..., u_diag_abs_smallest_idxs] = torch.finfo(dtype).eps
matrix = p @ l @ u
matrix.requires_grad_(requires_grad)
return matrix
def sample_generator():
for batch, size in product(((), (2,), (2, 2)), range(6)):
shape = batch + (size, size)
for rank in range(1, size):
yield make_singular_matrix_batch_base(shape, rank)
return [SampleInput(t) for t in sample_generator()]
def sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad, **kwargs):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
make_arg_fullrank = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad)
# (<matrix_size>, (<batch_sizes, ...>))
test_sizes = [
(1, ()),
(2, (0,)),
(2, (2,)),
]
for matrix_size, batch_sizes in test_sizes:
size = batch_sizes + (matrix_size, matrix_size)
for n in (0, 3, 5):
yield SampleInput(make_arg(size), args=(n,))
for n in [-4, -2, -1]:
yield SampleInput(make_arg_fullrank(*size), args=(n,))
def sample_inputs_hsplit(op_info, device, dtype, requires_grad, **kwargs):
return (SampleInput(make_tensor((6,), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
args=(2,),),
SampleInput(make_tensor((S, S, S), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
args=([1, 2, 3],),),)
def sample_inputs_vsplit(op_info, device, dtype, requires_grad, **kwargs):
return (SampleInput(make_tensor((6, S), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
args=(2,),),
SampleInput(make_tensor((S, S, S), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
args=([1, 2, 3],),),)
def sample_inputs_dsplit(op_info, device, dtype, requires_grad, **kwargs):
return (SampleInput(make_tensor((S, S, S), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
args=([1, 2, 3],),),
SampleInput(make_tensor((S, S, 6), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
args=(2,),),)
def error_inputs_hsplit(op_info, device, **kwargs):
err_msg1 = ("torch.hsplit requires a tensor with at least 1 dimension, "
"but got a tensor with 0 dimensions!")
si1 = SampleInput(make_tensor((),
dtype=torch.float32,
device=device),
args=(0,),)
err_msg2 = (f"torch.hsplit attempted to split along dimension 1, "
f"but the size of the dimension {S} "
f"is not divisible by the split_size 0!")
si2 = SampleInput(make_tensor((S, S, S),
dtype=torch.float32,
device=device),
args=(0,),)
# Incorrect type for indices_or_section argument
err_msg3 = ("received an invalid combination of arguments.")
si3 = SampleInput(make_tensor((S, S, S),
dtype=torch.float32,
device=device),
args=("abc",),)
yield ErrorInput(si1, error_regex=err_msg1)
yield ErrorInput(si2, error_regex=err_msg2)
yield ErrorInput(si3, error_type=TypeError, error_regex=err_msg3)
def error_inputs_vsplit(op_info, device, **kwargs):
err_msg1 = ("torch.vsplit requires a tensor with at least 2 dimension, "
"but got a tensor with 1 dimensions!")
si1 = SampleInput(make_tensor((S,),
dtype=torch.float32,
device=device),
args=(0,),)
err_msg2 = (f"torch.vsplit attempted to split along dimension 0, "
f"but the size of the dimension {S} "
f"is not divisible by the split_size 0!")
si2 = SampleInput(make_tensor((S, S, S),
dtype=torch.float32,
device=device),
args=(0,),)
# Incorrect type for indices_or_section argument
err_msg3 = ("received an invalid combination of arguments.")
si3 = SampleInput(make_tensor((S, S, S),
dtype=torch.float32,
device=device),
args=("abc",),)
yield ErrorInput(si1, error_regex=err_msg1)
yield ErrorInput(si2, error_regex=err_msg2)
yield ErrorInput(si3, error_type=TypeError, error_regex=err_msg3)
def error_inputs_dsplit(op_info, device, **kwargs):
err_msg1 = ("torch.dsplit requires a tensor with at least 3 dimension, "
"but got a tensor with 1 dimensions!")
si1 = SampleInput(make_tensor((S,),
dtype=torch.float32,
device=device),
args=(0,),)
err_msg2 = (f"torch.dsplit attempted to split along dimension 2, "
f"but the size of the dimension {S} "
f"is not divisible by the split_size 0!")
si2 = SampleInput(make_tensor((S, S, S),
dtype=torch.float32,
device=device),
args=(0,),)
return (ErrorInput(si1, error_regex=err_msg1),
ErrorInput(si2, error_regex=err_msg2),)
def sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad, **kwargs):
# Each test case consists of the sizes in the chain of multiplications
# e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5)
test_cases = [
[1, 2, 1],
[2, 0, 2],
[0, 2, 2],
[2, 2, 2, 2],
[2, 3, 4, 5],
[5, 4, 0, 2],
[2, 4, 3, 5, 3, 2]
]
result = []
for sizes in test_cases:
tensors = []
for size in zip(sizes[:-1], sizes[1:]):
t = make_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad)
tensors.append(t)
result.append(SampleInput(tensors))
return result
def sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs):
low_precision_dtypes = (torch.float16, torch.bfloat16, torch.complex32)
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
sizes = ((2, 2), (2, 3, 2))
if dtype in low_precision_dtypes:
# svdvals not supported for low precision dtypes
ords = ('fro', inf, -inf, 1, -1)
else:
ords = ('fro', 'nuc', inf, -inf, 1, -1, 2, -2)
dims = ((-2, -1), (-1, 0))
for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]):
yield SampleInput(make_arg(size), args=(ord, dim, keepdim))
def sample_inputs_linalg_norm(op_info, device, dtype, requires_grad, *, variant=None, **kwargs):
if variant is not None and variant not in ('subgradient_at_zero',):
raise ValueError(f"Unsupported variant, expected variant to be 'subgradient_at_zero' but got: {variant}")
test_sizes = [
(S,),
(0,),
(S, S),
(0, 0),
(S, 0),
(0, S),
(S, S, S),
(0, S, S),
(S, 0, S),
(0, 0, 0),
]
vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf)
matrix_ords = (None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf)
inputs = []
for test_size in test_sizes:
is_vector_norm = len(test_size) == 1
is_matrix_norm = len(test_size) == 2
for keepdim in [False, True]:
if not variant == 'subgradient_at_zero':
inputs.append(SampleInput(
make_tensor(
test_size, dtype=dtype, device=device, low=None, high=None,
requires_grad=requires_grad),
kwargs=dict(
keepdim=keepdim)))
if not (is_vector_norm or is_matrix_norm):
continue
ords = vector_ords if is_vector_norm else matrix_ords
for ord in ords:
if variant == 'subgradient_at_zero':
inputs.append(SampleInput(
torch.zeros(
test_size, dtype=dtype, device=device,
requires_grad=requires_grad),
args=(ord,),
kwargs=dict(keepdim=keepdim)))
else:
inputs.append(SampleInput(
make_tensor(
test_size, dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
args=(ord,),
kwargs=dict(
keepdim=keepdim)))
if ord in ['nuc', 'fro']:
inputs.append(SampleInput(
make_tensor(
test_size, dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
kwargs=dict(
ord=ord,
keepdim=keepdim,
dim=(0, 1))))
return inputs
def sample_inputs_as_strided(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# input shape, output shape, output stride, output storage offset
test_cases = (
((1,), (1,), (1,), 0),
((3, 3), (2, 2), (1, 2), 0),
((3, 3), (2, 2), (1, 2), 1),
((16,), (2, 2, 2, 2), (1, 1, 1, 1), 0),
((16,), (2, 1, 1, 2), (1, 7, 7, 1), 0),
)
for input_shape, output_shape, stride, storage_offset in test_cases:
input_t = make_arg(input_shape)
kwargs = dict(storage_offset=storage_offset)
yield SampleInput(input_t, args=(output_shape, stride), kwargs=kwargs)
# as_strided on offset, partial views
# yield SampleInput(make_arg((20,))[5:15], args=((2, 2), (1, 2)))
# yield SampleInput(make_arg((20,))[5:15], args=((2, 2), (1, 2)), kwargs={'storage_offset': 0})
def sample_inputs_as_strided_scatter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# input shape, output shape, output stride, output storage offset
test_cases = [
((1,), (1,), (1,), 0),
((3, 3), (2, 2), (1, 2), 0),
((3, 3), (2, 2), (1, 2), 1),
((16,), (2, 2, 2, 2), (1, 1, 1, 1), 0),
((16,), (2, 1, 1, 2), (1, 7, 7, 1), 0),
]
samples = []
for input_shape, output_shape, stride, storage_offset in test_cases:
input_t = make_arg(input_shape)
input_src = make_arg(output_shape)
kwargs = dict(storage_offset=storage_offset)
samples.append(SampleInput(input_t, args=(input_src, output_shape, stride), kwargs=kwargs))
return samples
def sample_inputs_combinations(op_info, device, dtype, requires_grad, **kwargs):
inputs = (
(0,),
(0, 1),
(0, 1, 2, 3),
)
rvals = [1, 2, 4]
products = product(inputs, rvals, [False, True])
samples = []
for input_data, r, with_replacement in products:
input_t = torch.tensor(input_data, device=device, dtype=dtype, requires_grad=requires_grad)
kwargs = dict(r=r, with_replacement=with_replacement)
samples.append(SampleInput(input_t, kwargs=kwargs))
return tuple(samples)
def sample_inputs_cartesian_prod(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(torch.tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# constructs 1-D tensors with varying number of elements
a = make_arg((0,))
b = make_arg((0, 1))
c = make_arg((0, 1, 2, 3))
samples = []
# sample with only 1 tensor
samples.append(SampleInput(
a
))
# sample with 2 tensors
samples.append(SampleInput(
a,
args=(b,)
))
# sample with 3 tensors
samples.append(SampleInput(
a,
args=(b, c)
))
return tuple(samples)
def sample_inputs_cosine_similarity(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as input_shape, dict of dim and eps
cases: Tuple[tuple, dict] = ( # type: ignore[assignment]
((S, S), {'dim': 1}),
((S, 2), {'dim': -1}),
((S,), {'dim': 0, 'eps': 0.5}),
((), {'dim': 0}),
((S, S, M), {'dim': 2}),
((S, S), {})
)
for input_shape, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(make_arg(input_shape),), kwargs=kwargs)
# Test for Broadcasting
yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1})
yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -2})
yield SampleInput(make_arg((2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1})
def sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
# Ordered as: input shape, kwargs for training, momentum, eps
cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]
((S, S, S), {'training': True, 'momentum': 0.5, 'eps': 0.6}),
((3, 2, 4), {'training': False, 'momentum': -1.2}),
((3, 1), {'training': True, 'momentum': 0.0}),
((0,), {'training': True}),
((0,), {'training': False}),
((3, 2, 3, 4), {'training': True, 'momentum': -1.0, 'eps': 0.5}),
((3, 2, 3, 4), {'training': False, 'momentum': -1.0, 'eps': 0.5}),
((2, 1), {}),
)
for input_shape, kwargs in cases:
# args: running mean, running var, weight and bias should necessarily be of shape: (channels,)
channels = input_shape[1] if len(input_shape) > 1 else 0
weight = make_arg(channels) if channels > 0 else None
bias = make_arg(channels) if channels > 0 else None
running_mean = make_arg_without_requires_grad(channels, low=0)
running_var = make_arg_without_requires_grad(channels, low=0)
yield SampleInput(
make_arg(input_shape),
args=(
running_mean,
running_var,
weight,
bias
),
kwargs=kwargs
)
# Checking for permutations of weights and biases as `None`
weights = [channels, None, None]
biases = [None, channels, None]
is_training = [True, False, False]
for weight, bias, training in zip(weights, biases, is_training):
yield SampleInput(
make_arg(input_shape),
args=(
running_mean,
running_var,
make_arg(channels),
make_arg(channels)
),
kwargs={'training': training}
)
# Test case for no optional kwargs
# running_mean and running_var are required in evaluation mode (training: False) but not in training mode
yield SampleInput(make_arg((1, 2, 3)), args=(None, None), kwargs={'training': True})
def sample_inputs_nn_activation_relu(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
(()),
((S, )),
((S, S)),
((S, M, S))
)
for shape in cases:
yield SampleInput(make_arg(shape))
def sample_inputs_prelu(op_info, device, dtype, requires_grad, **kwargs):
op_kwargs = op_info.sample_kwargs(device, dtype, None)[0]
yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad,
op_kwargs=op_kwargs)
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
(()),
((S, )),
((S, S)),
((S, M, S))
)
for shape in cases:
for weight in [-1., 0., 0.8, 1.]:
weight_tensor = torch.tensor(weight, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(make_arg(shape), args=(weight_tensor,))
if len(shape) >= 2:
channel_size = shape[1]
yield SampleInput(make_arg(shape), args=(make_arg((channel_size,)),))
weight_tensor = torch.tensor(1., device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(make_arg((S, S)), kwargs=dict(weight=weight_tensor,))
yield SampleInput(make_arg((S, S)), kwargs=dict(weight=make_arg((S,)),))
def reference_inputs_prelu(op, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_prelu(op, device, dtype, requires_grad, **kwargs)
yield from reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs)
def sample_kwargs_prelu_scalar_weight(device, dtype, input):
weight = torch.rand(tuple(), device=device, dtype=dtype)
# NumPy does not support bfloat16, so we default to float32 (only for NumPy) in that case
if dtype == torch.bfloat16:
weight_cpu = weight.to(dtype=torch.float32, device="cpu")
else:
weight_cpu = weight.cpu()
np_weight = weight_cpu.numpy()
return ({'weight': weight}, {'weight': np_weight})
def error_inputs_prelu(op, device):
# Weight has numel != 1, but self.ndim is zero-dim tensor
inp = make_tensor(tuple(), device=device, dtype=torch.float32)
weight = make_tensor((2,), device=device, dtype=torch.float32)
yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}),
error_regex="Not allow zero-dim input tensor.")
# Weight has numel != 1, but numel does not match channel size
inp = make_tensor((2, 8, 3), device=device, dtype=torch.float32)
weight = make_tensor((9,), device=device, dtype=torch.float32)
yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}),
error_regex="Mismatch of parameter numbers and input channel size.")
# Weight is neither a scalar nor 1-D tensor
inp = make_tensor((2, 8, 3), device=device, dtype=torch.float32)
weight = make_tensor((2, 4), device=device, dtype=torch.float32)
yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}),
error_regex="prelu: Expected `weight` to be a scalar or 1D tensor, but got ndim = 2")
# src and index tensors must have the same # of dimensions
def sample_inputs_norm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# ord = inf is tested in inputs_norm_inf as it fails on some tests
cases = [
((S, S), (2,), '2'),
((S, S), (0,), '0'),
((S, S), (0.5,), '0_5'),
((S, S), (1,), '1'),
((S, S), (3,), '3'),
((S, S), (-1,), 'neg_1'),
((S, S), (-2,), 'neg_2'),
((S, S), (-0.5,), 'neg_0_5'),
((S, S), (-1.5,), 'neg_1_5'),
]
cases_nonzero_input = (
((S, S, S), (1.5,), '1_5_default'),
((S, S, S), (1.5, 1), '1_5_dim'),
((S, S, S), (1.5, -1), '1_5_neg_dim'),
((S, S, S), (1.5, 1, True), 'keepdim_1_5_dim'),
((S, S, S), (1.5, -1, True), 'keepdim_1_5_neg_dim'),
)
cases_posdim = (
((S, S), (-2, 1,), 'neg_2_dim'),
((S, S), (-1, 1,), 'neg_1_dim'),
((S, S), (0, 1,), '0_dim'),
((S, S), (1, 1,), '1_dim'),
((S, S), (2, 1,), '2_dim'),
((S, S), (3, 1,), '3_dim'),
((S, S, S), (2, 1), '2_dim'),
((S, S, S), (3, 1), '3_dim'),
((S, S, S), (2, 1, True), 'keepdim_2_dim'),
((S, S, S), (3, 1, True), 'keepdim_3_dim'),
((), (2, 0), '2_dim_scalar'),
((), (3, 0), '3_dim_scalar'),
((), (2, 0, True), 'keepdim_2_dim_scalar'),
((), (3, 0, True), 'keepdim_3_dim_scalar'),
)
cases_negdim = ((shape, args[:1] + (-args[1],) + args[2:], name.replace("_dim", "_neg_dim"))
for shape, args, name in cases_posdim)
for shape, args, name in itertools.chain(cases, cases_posdim, cases_negdim):
yield SampleInput(make_arg(shape), args=args, name=name)
for shape, args, name in cases_nonzero_input:
yield SampleInput(make_arg(shape, exclude_zero=True), args=args, name=name)
def sample_inputs_norm_fro(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), (), 'default'),
((S, S), ('fro',), 'fro_default'),
((S, S), ('fro', [0, 1],), 'fro'),
)
for shape, args, name in cases:
yield SampleInput(make_arg(shape), args=args, name=name)
def sample_inputs_norm_nuc(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), ('nuc',), 'nuc'),
((S, S, S), ('nuc', [1, 2]), 'nuc_batched'),
)
for shape, args, name in cases:
yield SampleInput(make_arg(shape), args=args, name=name)
def sample_inputs_norm_inf(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), (-inf,), '-inf'),
((S, S), (inf,), 'inf'),
((S, S), (inf, 1,), 'inf_2_dim'),
((S, S), (inf, -1,), 'inf_2_neg_dim'),
)
for shape, args, name in cases:
yield SampleInput(make_arg(shape), args=args, name=name)
def sample_kwargs_vector_norm(t, **kwargs):
# orders with / without identity
def ords():
has_id = (6, 4, 2, 1, 0, 0.9)
no_id = (inf, -2.1, -inf)
if t.numel() == 0:
dim = kwargs.get("dim")
if dim is None:
return has_id
if not isinstance(dim, Iterable):
dim = (dim,)
for d in dim:
if t.size(d) == 0:
return has_id
return has_id + no_id
return (((), dict(ord=o)) for o in ords())
def sample_inputs_equal(op, device, dtype, requires_grad, **kwargs):
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
shapes = (
((), ()),
((S,), ()),
((), (S,)),
((S, 1), (S,)),
((M, S), ()),
((S, S), (S, S))
)
for shape_lhs, shape_rhs in shapes:
lhs = make_arg(shape_lhs)
rhs = make_arg(shape_rhs)
broadcasts_input = shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs)
yield SampleInput(lhs, args=(rhs,), broadcasts_input=broadcasts_input)
if shape_lhs == shape_rhs:
yield SampleInput(lhs, args=(lhs.clone().detach_(),))
def sample_inputs_jiterator(op, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
shapes = (
((), ()),
((S,), ()),
((S, 1), (S,)),
((M, S), ()),
((S, M, S), (M, S)),
((S, M, S), (S, M, S)),
((M, 1, S), (M, S)),
((M, 1, S), (1, M, S)),
((0, 1, 3), (0, 10, 3))
)
num_inputs = kwargs.get('num_inputs')
sample_kwargs = kwargs.get('sample_kwargs', {})
for shape_lhs, shape_rhs in shapes:
lhs = make_arg(shape_lhs)
args = []
for i in range(num_inputs - 1):
args.append(make_arg(shape_rhs))
broadcasts_input = (shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs))
yield SampleInput(lhs, args=tuple(args), kwargs=sample_kwargs, broadcasts_input=broadcasts_input)
def sample_inputs_broadcast_shapes(op, device, dtype, requires_grad, **kwargs):
shapes = (
((), ()),
((S,), ()),
((S, 1), (S,)),
((S, 1), S),
((M, S), ()),
((S, M, S), (M, S)),
((S, M, S), (S, M, S)),
((M, 1, S), (M, S)),
((M, 1, S), (1, M, S)),
((0, 1, 3), (0, 10, 3))
)
for shape in shapes:
inp, *arg0 = shape
yield SampleInput(inp, args=tuple(arg0))
def sample_inputs_add_sub(op, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs)
# Adds alpha kwarg cases
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs)
rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs)
if dtype is not torch.bool:
yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': 2})
else:
yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': True})
neg_alpha = -3.14 if (dtype.is_floating_point or dtype.is_complex) else -3
lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs)
rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs)
if dtype is not torch.bool:
yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': neg_alpha})
else:
yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': False})
def error_inputs_arange(op, device, **kwargs):
yield ErrorInput(SampleInput(0, args=(3, 0)), error_type=RuntimeError, error_regex='step must be nonzer')
yield ErrorInput(SampleInput(0, args=(-3, 2)), error_type=RuntimeError, error_regex='bound inconsistent with step sign')
yield ErrorInput(SampleInput(0, args=(3, -2)), error_type=RuntimeError, error_regex='bound inconsistent with step sign')
yield ErrorInput(SampleInput(0, args=(float('inf'), 2)), error_type=RuntimeError, error_regex='unsupported range')
yield ErrorInput(SampleInput(float('-inf'), args=(1, 2)), error_type=RuntimeError, error_regex='unsupported range')
def sample_inputs_arange(op, device, dtype, requires_grad, **kwargs):
int_samples = (
# positive direction
(-1, 2, 2),
# negative direction
(2, -3, -1),
# start == end
(1, 1, 1),
(1, 1, -1),
# divides evenly
(0, -8, -4),
(1, 5, 2),
# bool
(False, True, True),
# default step
(0, 1, None),
# default start
(None, 3, None),
)
def to_float(start, end, step):
start = start + 0.1 if start is not None else None
end = end + 0.1
step = float(step) if step is not None else None
return start, end, step
float_samples = (
# includes endpoint
(0., -8. - 1e-6, -4.),
(1., 5. + 1e-6, 2.),
(0., -8., -4.),
(1., 5., 2.),
*(to_float(start, end, step) for (start, end, step) in int_samples),
)
large_samples = (
(0, 10000, None),
)
samples = int_samples + float_samples
if dtype not in (torch.int8, torch.uint8):
samples += large_samples
for start, end, step in samples:
if start is None:
assert step is None
yield SampleInput(end, kwargs={"dtype": dtype, "device": device})
elif step is None:
yield SampleInput(start, args=(end,), kwargs={"dtype": dtype, "device": device})
else:
yield SampleInput(start, args=(end, step), kwargs={"dtype": dtype, "device": device})
yield SampleInput(2)
yield SampleInput(1, args=(3, 1))
def error_inputs_linspace(op, device, **kwargs):
yield ErrorInput(SampleInput(0, args=(3, -1)), error_type=RuntimeError, error_regex='number of steps must be non-negative')
yield ErrorInput(SampleInput(0, args=(3, 1.)), error_type=TypeError, error_regex='must be int, not float')
def sample_inputs_linspace(op, device, dtype, requires_grad, **kwargs):
ends = (-3, 0, 1, 4, 50)
starts = (-2., 0, 4.3, 50)
nsteps = (0, 1, 50)
# Extra case to replicate off-by-one issue on CUDA
cases = list(product(starts, ends, nsteps)) + [(0, 7, 50)]
for start, end, nstep in cases:
if dtype == torch.uint8 and end < 0 or start < 0:
continue
yield SampleInput(start, args=(end, nstep), kwargs={"dtype": dtype, "device": device})
yield SampleInput(1, args=(3, 1))
def sample_inputs_logpace(op, device, dtype, requires_grad, **kwargs):
ends = (-3, 0, 1.2, 2, 4)
starts = (-2., 0, 1, 2, 4.3)
nsteps = (0, 1, 2, 4)
bases = (2., 1.1) if dtype in (torch.int8, torch.uint8) else (None, 2., 3., 1.1, 5.)
for start, end, nstep, base in product(starts, ends, nsteps, bases):
if dtype == torch.uint8 and end < 0 or start < 0:
continue
if nstep == 1 and isinstance(start, float) and not (dtype.is_complex or dtype.is_floating_point):
# https://github.com/pytorch/pytorch/issues/82242
continue
if base is None:
yield SampleInput(start, args=(end, nstep), kwargs={"dtype": dtype, "device": device})
else:
yield SampleInput(start, args=(end, nstep, base), kwargs={"dtype": dtype, "device": device})
yield SampleInput(1, args=(3, 1, 2.))
def sample_inputs_isclose(op, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs)
# Creates additional inputs to test the rtol, atol, and equal_nan params
rtols = [0., 1e-7]
atols = [0., 1e-7]
equal_nans = [False, True]
products = product(rtols, atols, equal_nans)
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
for rtol, atol, equal_nan in products:
lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs)
rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs)
yield SampleInput(lhs, args=(rhs,),
kwargs=dict(rtol=rtol, atol=atol, equal_nan=equal_nan))
def error_inputs_isclose(op, device, **kwargs):
make_float_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False)
yield ErrorInput(SampleInput(make_float_arg(()), args=(make_float_arg(()),), kwargs={'rtol': -0.4}),
error_type=RuntimeError,
error_regex='rtol must be greater than or equal to zero')
yield ErrorInput(SampleInput(make_float_arg(()), args=(make_float_arg(()),), kwargs={'atol': -0.4}),
error_type=RuntimeError,
error_regex='atol must be greater than or equal to zero')
def sample_inputs_linalg_vecdot(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
batches = ((), (0,), (1,), (5,))
ns = (0, 1, 3, 5)
for b, n in product(batches, ns):
shape = b + (n,)
yield SampleInput(make_arg(shape), args=(make_arg(shape),))
for i in range(len(shape)):
yield SampleInput(make_arg(shape), args=(make_arg(shape),), kwargs=dict(dim=i))
def sample_inputs_t(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return (SampleInput(make_arg((1, 2))),
SampleInput(make_arg((2,))),
SampleInput(make_arg(())))
def sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def make_arg_conj(size):
return make_arg(size).conj().requires_grad_(requires_grad)
first_shape, second_shape = (S, M), (M, S)
yield SampleInput(make_arg(first_shape), args=(make_arg(second_shape),))
if dtype.is_complex:
yield SampleInput(make_arg(first_shape), args=(make_arg_conj(second_shape),))
def sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs):
alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6)
beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2)
tests_list = [
((2, 3), (2, 2), (2, 3), False)
]
tests_with_lhs_broadcasting = [
((1,), (2, 2), (2, 3), True),
((), (2, 2), (2, 3), True)
]
test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator]
sample_inputs = []
for shape_a, shape_b, shape_c, broadcasts_input in test_cases:
sample_inputs.append(
SampleInput(
make_tensor(shape_a, dtype=dtype, device=device, requires_grad=requires_grad),
args=(
make_tensor(shape_b, dtype=dtype, device=device,
requires_grad=requires_grad),
make_tensor(shape_c, dtype=dtype, device=device,
requires_grad=requires_grad)),
kwargs={'alpha': alpha_val, 'beta': beta_val},
broadcasts_input=broadcasts_input))
if dtype.is_complex:
shape = (3, 3)
sample_inputs.append(
SampleInput(make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad),
args=(
make_tensor(shape, dtype=dtype, device=device).mH.requires_grad_(requires_grad),
make_tensor(shape, dtype=dtype, device=device,
requires_grad=requires_grad)),
kwargs={'alpha': alpha_val, 'beta': beta_val},))
sample_inputs.append(
SampleInput(make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad),
args=(
make_tensor(shape, dtype=dtype, device=device,
requires_grad=requires_grad),
make_tensor(shape, dtype=dtype, device=device).mH.requires_grad_(requires_grad)),
kwargs={'alpha': alpha_val, 'beta': beta_val},))
return sample_inputs
def sample_inputs_sparse_sampled_addmm(op_info, device, dtype, requires_grad, **kwargs):
alpha = 2 + 3j if dtype.is_complex else 0.6
beta = 1 + 2j if dtype.is_complex else 0.2
def generator():
# sparse.sampled_addmm performs: alpha * (A @ B) * sparse_ones_like(C) + beta * C
for m, n, k in itertools.product([0, 5], repeat=3):
yield SampleInput(
torch.eye(m, n, device=device, dtype=dtype)
.to_sparse_csr()
.requires_grad_(requires_grad),
args=(
make_tensor(
(m, k),
device=device,
dtype=dtype,
requires_grad=requires_grad,
),
make_tensor(
(k, n),
device=device,
dtype=dtype,
requires_grad=requires_grad,
),
),
kwargs={"alpha": alpha, "beta": beta},
)
return list(generator())
def sample_inputs_mv(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((S, M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
)
),
)
def sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((M, S, M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((M, M, S, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
)
),
)
def sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def make_arg_conj(size):
return make_arg(size).conj().requires_grad_(requires_grad)
sample_inputs = []
sample_inputs.append(SampleInput(make_arg((S, )), args=(make_arg((S, )),)))
if dtype.is_complex:
# dot/vdot for (conj(input), conj(arg_tensor)) and (conj(input), arg_tensor)
# is tested in test_conj_view (which tests operations with only conjugated input tensor
# -- not conjugated arg tensors)
sample_inputs.append(SampleInput(make_arg((S, )), args=(make_arg_conj((S, )),)))
return sample_inputs
def sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
test_cases = (((S,), (S, M), (M,), 1, 1, False),
((S,), (S, M), (M,), 0.2, 0.6, False),
)
test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True),
((1,), (S, M), (M,), 0.2, 0.6, True),
((), (S, M), (M,), 1, 1, True),
((), (S, M), (M,), 0.2, 0.6, True),
)
cases = test_cases + test_cases_with_broadcast
# addmv performs: beta * M + alpha * (mat @ vec)
for size, mat, vec, beta, alpha, broadcasts_input in cases:
yield SampleInput(make_arg(size), args=(make_arg(mat), make_arg(vec)),
kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input)
def sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# input_shape, batch1_shape, batch2_shape, beta_val, alpha_val, is_broadcasting
test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1, False),
((1,), (S, S, S), (S, S, M), 1, 1, True),
((S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),
((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),
((), (S, S, S), (S, S, M), 1, 1, True),
((), (S, S, S), (S, S, M), 0.6, 0.2, True),
]
for input_shape, batch1_shape, batch2_shape, beta, alpha, is_broadcasting in test_cases:
if dtype.is_complex:
beta_complex, alpha_complex = beta * (1 + 2j), alpha * (2 + 3j)
yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)),
kwargs=dict(beta=beta_complex, alpha=alpha_complex), broadcasts_input=is_broadcasting)
yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)),
kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=is_broadcasting)
def sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs):
test_cases = [(((S, S), (S, S), (S, S)), False),
(((S, S), (S, 1), (1, S)), False),
(((1,), (S, S, 1), (1, S)), True),
(((), (), ()), False),
(((S, S), (), ()), True),
(((), (S, S, 1), (1, S)), True)
]
sample_inputs = []
for input_args, broadcasts_input in test_cases:
# addcdiv should accept inputs with zero value
# Currently, it throws ZeroDivisionError when the denominator is zero
# TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed
args = tuple(make_tensor(arg, dtype=dtype, device=device, requires_grad=requires_grad,
exclude_zero=True) if isinstance(arg, tuple) else arg
for arg in input_args)
sample_inputs.append(SampleInput(
args[0],
args=args[1:],
broadcasts_input=broadcasts_input))
# addcdiv should accept inputs with zero value
# Currently, it throws ZeroDivisionError when the denominator is zero
# TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed
args = tuple(make_tensor(arg, dtype=dtype, device=device, requires_grad=requires_grad,
exclude_zero=True) if isinstance(arg, tuple) else arg
for arg in input_args)
sample_inputs.append(SampleInput(
args[0],
args=args[1:],
kwargs=dict(value=3.14), broadcasts_input=broadcasts_input))
return tuple(sample_inputs)
def sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs):
test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False),
((1,), (S, S, S), (S, S, M), 1, 1, True),
((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),
((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),
((), (S, S, S), (S, S, M), 1, 1, True),
((), (S, S, S), (S, S, M), 0.6, 0.2, True),
]
sample_inputs = []
for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases:
args = (make_tensor(input_shape, dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(batch1_shape, dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(batch2_shape, dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad))
sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]),
kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input))
if dtype.is_complex:
sample_inputs.append(SampleInput(
args[0].clone().requires_grad_(requires_grad),
args=(args[1].clone().requires_grad_(requires_grad),
args[2].clone().requires_grad_(requires_grad)),
kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),
broadcasts_input=broadcasts_input))
if dtype.is_complex:
shapes = [(S, S, S), (S, M, S), (S, S, M)]
args = (make_tensor(shapes[0], dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(shapes[1], dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(shapes[2], dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad))
sample_inputs.append(
SampleInput(
args[0].transpose_(-1, 1),
args=(args[1].transpose(-1, 1).conj().requires_grad_(requires_grad),
args[2].transpose(-1, 1).conj().requires_grad_(requires_grad)),
kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),))
return tuple(sample_inputs)
# TODO: add reduction kwargs
def sample_inputs_multilabel_soft_margin_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
shapes = (
(S,),
(S, S),
)
for shape in shapes:
# Produce one with weight and one without.
yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),), kwargs={})
yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),),
kwargs={'weight': _make_tensor(shape, requires_grad=False)})
def sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs):
yield SampleInput(
make_tensor((S, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)))
yield SampleInput(
make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)),
broadcasts_input=True)
if dtype.is_complex:
alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j
elif dtype.is_floating_point:
alpha, beta = 0.2, 0.6
else:
alpha, beta = 2, 3
yield SampleInput(
make_tensor((S, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)),
kwargs=dict(beta=beta, alpha=alpha))
yield SampleInput(
make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)),
kwargs=dict(beta=beta, alpha=alpha),
broadcasts_input=True)
# These samples fail gradcheck
if dtype.is_floating_point and not requires_grad:
yield SampleInput(
torch.tensor([[math.nan]], device=device, requires_grad=requires_grad),
args=(
torch.tensor([0.0], device=device, requires_grad=requires_grad),
torch.tensor([0.0], device=device, requires_grad=requires_grad),
),
kwargs=dict(beta=0.0, alpha=0.0),
broadcasts_input=True)
yield SampleInput(
torch.tensor([[0.0]], device=device, requires_grad=requires_grad),
args=(
torch.tensor([math.nan], device=device, requires_grad=requires_grad),
torch.tensor([math.nan], device=device, requires_grad=requires_grad),
),
kwargs=dict(beta=0.0, alpha=0.0),
broadcasts_input=True)
def sample_inputs_zero_(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = ((), (S, S, S), (S,))
for shape in cases:
yield(SampleInput(make_arg(shape)))
# TODO: add reduction kwargs
def sample_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False)
inputs = (
((), make_target([], low=0, high=1), {}),
((S,), make_target([], low=0, high=S), {"p": 1}),
((S,), make_target([1], low=0, high=S), {"p": 2}),
((S, M), make_target([S], low=0, high=M), {"margin": 1.0}),
((M, S), make_target([M], low=0, high=S), {"weight": None}),
)
for input_shape, target, kwargs in inputs:
yield SampleInput(_make_tensor(input_shape), args=(target,), kwargs=kwargs)
def sample_inputs_logsumexp(self, device, dtype, requires_grad, **kwargs):
inputs = (
((), (0,), True),
((S, S), (1,), True),
((S, S), (1,), False),
((S, S), (-2,), False),
((S, S), (0, 1), False),
)
samples = []
# Test large inputs to check numerical stability
lows = (None, 1e3, 1e6) if dtype in (torch.float32, torch.float64) else (None,)
for low in lows:
high = low * 2 if low is not None else None
for shape, dim, keepdim in inputs:
t = make_tensor(shape, dtype=dtype, device=device,
low=low, high=high,
requires_grad=requires_grad)
samples.append(SampleInput(t, args=(dim, keepdim)))
return tuple(samples)
def sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs):
inputs = [
((), {}),
((S, S), {}),
((0, S, 0), {}),
((S,), {'dtype': dtype, 'device': device}),
# Hard-code some dtypes/devices. We want to test cases where the
# (dtype, device) is different from the input's (dtype, device)
((S,), {'dtype': torch.double}),
((S,), {'device': 'cpu'}),
((S,), {'dtype': torch.double, 'device': 'cpu'}),
]
if torch.cuda.is_available():
inputs.append(((S,), {'device': 'cuda'}))
samples = []
for shape, kwargs in inputs:
t = make_tensor(shape, dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, kwargs=kwargs))
return tuple(samples)
def reference_inputs_like_fns(op, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_like_fns(op, device, dtype, requires_grad, **kwargs)
# shape
cases = (
(), (0,), (1, 0), (1, 1, 4, 5), (5, 3, 0, 1), (1, 4, 3, 1, 1)
)
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
for shape in cases:
yield SampleInput(make_arg(shape))
yield SampleInput(make_arg(shape).transpose(0, -1))
yield SampleInput(make_arg(shape, noncontiguous=True))
yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1))
# TODO: add reduction kwargs
def sample_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False)
inputs = (
([], make_target([], low=0, high=1)),
([S], make_target([S], low=0, high=S)),
([M, S], make_target([M, S], low=0, high=S)),
)
for shape, target in inputs:
yield SampleInput(_make_tensor(shape), args=(target,))
def get_independent_tensor(tensor):
return tensor.clone().requires_grad_(tensor.requires_grad)
def sample_inputs_randint_like(self, device, dtype, requires_grad, **kwargs):
samples = []
low = 2
high = 10
for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs):
# With high
samples.append(SampleInput(
sample.input,
args=(high,) + sample.args,
kwargs=sample.kwargs))
# With low and high
samples.append(SampleInput(
get_independent_tensor(sample.input),
args=(low, high,) + sample.args,
kwargs=sample.kwargs))
return tuple(samples)
def sample_inputs_margin_ranking_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
shapes = (
(),
(S,),
(S, S),
(S, S, S),
)
margins = (0., 1.)
reductions = ('sum', 'mean', 'none')
for shape in shapes:
for margin, reduction in product(margins, reductions):
kwargs = {'margin': margin, 'reduction': reduction}
yield SampleInput(_make_tensor(shape),
args=(_make_tensor(shape, requires_grad=False),
_make_tensor(shape, requires_grad=False)),
kwargs=kwargs)
def reference_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs)
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
for reduction in ('sum', 'mean', 'none'):
if dtype.is_floating_point: # only supports ints and floats
# NaN propagation
inp1 = make_input((10, ))
inp1[2] = float('nan')
inp2 = make_input((10, ))
inp2[4] = float('nan')
target = make_input((10, ))
inp2[9] = float('nan')
yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction})
# Inf handling
inp1 = make_input((10, ))
inp2[1] = float('inf')
inp2 = make_input((10, ))
inp2[4] = float('inf')
target = make_input((10, ))
inp2[7] = float('inf')
yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction})
# Broadcasting
inp1 = make_input((5, 2))
inp2 = make_input((5, 1))
target = make_input((1, 2))
yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction})
def error_inputs_margin_ranking_loss(op, device, **kwargs):
make_input = partial(make_tensor, device=device, dtype=torch.float32)
# invalid reduction value.
yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5, 4),), kwargs={'reduction': 'abc'}),
error_type=ValueError, error_regex='is not a valid value')
# invalid input shapes
yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5,),)),
error_regex='margin_ranking_loss : All input tensors should')
def sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs):
inputs = [
((), (), {}),
((S, S), (2, 0), {}),
((0, S, 0), (3, 2, 2), {}),
((S,), (2, 3), {'dtype': dtype, 'device': device}),
# Hard-code some dtypes/devices. We want to test cases where the
# (dtype, device) is different from the input's (dtype, device)
((S,), (10,), {'dtype': torch.double}),
((S,), (1, 1, 12), {'device': 'cpu'}),
((S,), (2, 2, 2), {'dtype': torch.double, 'device': 'cpu'}),
]
if torch.cuda.is_available():
inputs.append(((S,), (7, 2), {'device': 'cuda'}))
samples = []
for input_shape, output_shape, kwargs in inputs:
t = make_tensor(input_shape, dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, args=(output_shape,), kwargs=kwargs))
return tuple(samples)
def sample_inputs_empty(op, device, dtype, requires_grad, **kwargs):
# shape
cases = (
(), (0,), (1,), (1, 3, 5), (5, 3, 1), (1, 0, 5, 1),
)
for case in cases:
_kwargs = {'device': device, 'dtype': dtype, 'requires_grad': requires_grad}
yield SampleInput(case, args=(), kwargs=_kwargs)
def sample_inputs_new_full(self, device, dtype, requires_grad, **kwargs):
def get_val(dtype):
return make_tensor([], dtype=dtype, device="cpu").item()
samples = []
for sample in sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs):
# The scalar we are passing to new_full must be the same dtype
# as the one of the resulting tensor
use_dtype = sample.kwargs['dtype'] if 'dtype' in sample.kwargs else dtype
samples.append(SampleInput(
sample.input, args=sample.args + (get_val(use_dtype),), kwargs=sample.kwargs))
return tuple(samples)
def sample_inputs_full_like(self, device, dtype, requires_grad, **kwargs):
def get_val(dtype):
return make_tensor([], dtype=dtype, device="cpu").item()
inputs = [
((), get_val(dtype), {}),
((S, S), get_val(dtype), {}),
((0, S, 0), get_val(dtype), {}),
((S,), get_val(dtype), {'dtype': dtype, 'device': device}),
# Hard-code some dtypes/devices. We want to test cases where the
# (dtype, device) is different from the input's (dtype, device)
((S,), get_val(torch.double), {'dtype': torch.double}),
((S,), get_val(dtype), {'device': 'cpu'}),
((S,), get_val(torch.double), {'dtype': torch.double, 'device': 'cpu'}),
]
if torch.cuda.is_available():
inputs.append(((S,), get_val(dtype), {'device': 'cuda'}))
samples = []
for shape, fill_value, kwargs in inputs:
t = make_tensor(shape, dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, args=(fill_value,), kwargs=kwargs))
return tuple(samples)
def sample_inputs_multinomial(self, device, dtype, requires_grad, **kwargs):
cases = [
([3], 3, dict()),
([10], 3, dict()),
([3, 10], 3, dict()),
([3], 3, dict(replacement=False)),
([3], 3, dict(replacement=True)),
([3, 4], 4, dict(replacement=True)),
([3, 4], 4, dict(replacement=False)),
]
samples = []
for shape, num_samples, kwargs in cases:
t = make_tensor(shape, dtype=dtype, device=device,
low=0, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, args=(num_samples,), kwargs=kwargs))
return tuple(samples)
def sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs):
def get_value_or_make_tensor(value_or_shape):
if isinstance(value_or_shape, list):
return make_tensor(value_or_shape, dtype=dtype, device=device,
low=0, high=None,
requires_grad=requires_grad)
return value_or_shape
samples = []
for value_or_mean_shape, value_or_std_shape, kwargs in cases:
mean = get_value_or_make_tensor(value_or_mean_shape)
std = get_value_or_make_tensor(value_or_std_shape)
samples.append(SampleInput(mean, args=(std,), kwargs=kwargs))
return tuple(samples)
def sample_inputs_normal_tensor_first(self, device, dtype, requires_grad, **kwargs):
# value_or_size, value_or_size, kwargs
cases = [
([], [], {}),
([3], [3], {}),
([3, 4, 2], [3, 4, 2], {}),
([2, 3], 1.1, {}),
([1, 2, 3], [5, 2, 3], {}), # broadcasting
]
return sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs)
def sample_inputs_normal_tensor_second(self, device, dtype, requires_grad, **kwargs):
cases = [
([3, 4], 0.3, {}),
]
return sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs)
def sample_inputs_bernoulli(self, device, dtype, requires_grad, **kwargs):
shapes = [
[3],
[],
[0, 3],
[2, 3, 4],
]
samples = []
for shape in shapes:
t = make_tensor(shape, dtype=dtype, device=device,
low=0, high=1,
requires_grad=requires_grad)
samples.append(SampleInput(t))
return tuple(samples)
def sample_inputs_logcumsumexp(self, device, dtype, requires_grad, **kwargs):
inputs = (
((S, S, S), 0),
((S, S, S), 1),
((), 0),
)
samples = []
for large_number in (True, False):
for shape, dim in inputs:
t = make_tensor(shape, dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad)
if large_number and t.dim() > 0:
t[0] = 10000
samples.append(SampleInput(t, args=(dim,)))
return tuple(samples)
def sample_inputs_trace(self, device, dtype, requires_grad, **kwargs):
return (SampleInput((make_tensor((S, S), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad))),)
def error_inputs_trace(op, device):
yield ErrorInput(SampleInput(make_tensor((3, 4, 5), dtype=torch.float32, device=device)), error_regex="expected a matrix")
def sample_inputs_renorm(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (2, 1, 0.5)),
((S, S, S), (2, -1, 0.5)),
((S, S, S), (1, 2, 3)),
((S, S, S), (float('inf'), 2, 0.5)),
)
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
def sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((1, 2, 3), (-1, -2)),
((1, 2, 3), (-1, 2)),
((1, 2, 3), (1, -2)),
((1, 2, 3), (1, 2)),
((), (0, 0)),
((1, ), (0, 0)),
((M, M), (0, 1)),
((S, S, S), (2, 0)), )
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
def _numpy_ref_transpose(a, dim0, dim1):
if a.ndim <= 1:
return a
return np.swapaxes(a, dim0, dim1)
def sample_inputs_adjoint(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
shapes = ((1, 2, 3), (), (M, M), (S, S, S), (S, M, S), (M, S, M, S))
return (SampleInput(make_arg(shape)) for shape in shapes)
def sample_inputs_T(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
shapes = ((), (M, M))
return (SampleInput(make_arg(shape)) for shape in shapes)
def sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates invertible inputs for linear algebra ops
The input is generated as the itertools.product of 'batches' and 'ns'.
In total this function generates 8 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices,
(1, 1) - 1x1 batch of matrices
'ns' gives 0x0 and 5x5 matrices.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
"""
make_fn = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad)
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 0]
for batch, n in product(batches, ns):
yield SampleInput(make_arg(*batch, n, n))
def sample_inputs_linalg_pinv_singular(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function produces factors `a` and `b` to generate inputs of the form `a @ b.t()` to
test the backward method of `linalg_pinv`. That way we always preserve the rank of the
input no matter the perturbations applied to it by the gradcheck.
Note that `pinv` is Frechet-differentiable in a rank-preserving neighborhood.
"""
batches = [(), (0, ), (2, ), (1, 1)]
# the size of at least 30 is required to cause failures for the previous implicit implementation
# of the pinv's backward method, albeit it is slow.
size = [0, 3, 50]
for batch, m, n in product(batches, size, size):
for k in range(min(3, min(m, n))):
# Note that by making the columns of `a` and `b` orthonormal we make sure that
# the product matrix `a @ b.t()` has condition number 1 when restricted to its image
a = torch.rand(*batch, m, k, device=device, dtype=dtype).qr().Q.requires_grad_(requires_grad)
b = torch.rand(*batch, n, k, device=device, dtype=dtype).qr().Q.requires_grad_(requires_grad)
yield SampleInput(a, args=(b,))
def sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function produces two tensors of shape (*, m, k) and (*, n, k) with k <= min(m, n).
Their matrix product could be used to generate tensor of shape (*, m, n) of rank k.
"""
batches = [(), (0, ), (2, ), (1, 1)]
size = [1, 5, 10]
for batch, m, n in product(batches, size, size):
for k in range(min(3, min(m, n))):
a = make_tensor((*batch, m, k), dtype=dtype, device=device, requires_grad=requires_grad)
b = make_tensor((*batch, n, k), dtype=dtype, device=device, requires_grad=requires_grad)
yield SampleInput(a, args=(b,), kwargs=kwargs)
def clone_sample(sample, **kwargs):
"""
Given a SampleInput, this function analyzes its input, args and kwargs,
and produces a copy with each non-Tensor entry being copied by reference,
and with each Tensor entry cloned with `t.clone().requires_grad_(t.requires_grad)`
"""
def clone_tensor(t):
if isinstance(t, torch.Tensor):
return t.detach().clone().requires_grad_(t.requires_grad)
else:
return t
sample_kwargs = kwargs if kwargs else sample.kwargs
return SampleInput(
clone_tensor(sample.input),
args=tuple(map(clone_tensor, sample.args)),
kwargs=dict(((k, clone_tensor(v)) for k, v in sample_kwargs.items()))
)
def sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad=False, **kwargs):
for sample in sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad, **kwargs):
*batch, m, k = sample.input.shape
*_, n, _ = sample.args[0].shape
# NOTE: since svd_lowrank relies on non rank-revealing SVD,
# it inherits the problem of unstable behavior with repeated
# singular values including zeros.
# Since we want to avoid (repeated) zeros as singular values,
# we can only use k for q.
# This issues could be resolved with using a rank-revealing SVD
# which does not include "zero" singular values.
op_kwargs = {
'q': k,
'M': None
}
# without M specified
yield clone_sample(sample, **op_kwargs)
# now with M
# TODO: fix bug in the documentation for svd_lowrank:
# M has to be (*, m, n), and not (*, 1, n) as written
# in the documentation
op_kwargs['M'] = make_tensor((*batch, m, n), dtype=dtype, device=device, requires_grad=requires_grad)
yield clone_sample(sample, **op_kwargs)
def chunk_iter(iterable, size):
it = iter(iterable)
while True:
chunk = tuple(islice(it, size))
if not chunk:
break
yield chunk
def sample_inputs_pca_lowrank(op_info, device, dtype, requires_grad=False, **kwargs):
# we reuse samples from svd_lowrank which come in group of two with
# kwarg['M'] = None and with kwarg['M'] = <some tensor>
samples = sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad, **kwargs)
for s1, s2 in chunk_iter(samples, 2):
del s1.kwargs['M']
del s2.kwargs['M']
s1.kwargs['center'] = False
s2.kwargs['center'] = True
yield s1
yield s2
def sample_inputs_linalg_cond(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# autograd is not supported for inputs with zero number of elements
shapes = ((S, S),
(2, S, S),
(2, 1, S, S), )
for shape in shapes:
yield SampleInput(make_arg(shape))
def sample_inputs_linalg_vander(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
shapes = ((),
(1,),
(S,),
(2, S),)
for shape in shapes:
if len(shape) > 0 and shape[-1] > 1:
yield SampleInput(make_arg(shape))
n = shape[-1] if len(shape) > 0 else 1
for i in range(3):
# n-1, n, n+1
N = n + i - 1
if N < 2:
continue
yield SampleInput(make_arg(shape), kwargs=dict(N=N))
def np_vander_batched(x, N=None):
# Wrapper around np.vander that supports batches of 1 dimension (enough for the tests)
if x.ndim == 0:
x = x[np.newaxis]
if x.ndim == 1:
y = np.vander(x, N=N, increasing=True)
return y
else:
if N is None:
N = x.shape[-1]
y = np.vander(x.ravel(), N=N, increasing=True).reshape((*x.shape, N))
return y
def np_sinc_with_fp16_as_fp32(x):
# Wraps numpy's sinc function so that fp16 values are promoted to fp32
# before sinc is invoked. Context: numpy's sinc returns NaN when evaluated
# at 0 for fp16.
if x.dtype == np.float16:
return np.sinc(x.astype(np.float32))
else:
return np.sinc(x)
def sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
((S, 1, 1), (S, S, S)),
((S, 1, S), (S, S, S)),
((S, 1), (S, S, S)),
((1,), (S, S, S)),
((1, S), (1, 1, S)),
((), ()),
((), (1, 3, 2)),
)
return tuple(
SampleInput(
make_tensor(size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(shape,)) for size, shape in test_cases)
def sample_inputs_broadcast_tensors(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
test_cases: Tuple[tuple] = (((3,), (1, 2, 1), (1, 1), (5, 1, 1),),)
samples: List[SampleInput] = []
for shape, *other_shapes in test_cases:
samples.append(SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)))
return samples
def reference_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs)
m = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
n = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True)
cases = (
((), (1, 1), (1, 1, 7, 1), (3, 1, 1)),
((3, 5, 6), (1, 3, 5, 6), (1, 1, 1, 1, 6), (8, 3, 5, 6))
)
for a, b, c, d in cases:
yield SampleInput(m(a), args=(m(b), m(c), m(d)))
yield SampleInput(n(a), args=(n(b), n(c), n(d)))
def sample_inputs_block_diag(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
test_cases: Tuple[tuple] = (
((1, S), (2, S), (3, S),),
((S, 1), (S, 2), (S, 3),),
((1,), (2,), (3,),),
((2, S), (S,))
)
samples: List[SampleInput] = []
for shape, *other_shapes in test_cases:
samples.append(SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)))
# We also want to test mixed complex-non-complex inputs to block_diag
if dtype == torch.complex32 or dtype == torch.complex64:
non_complex_dtype = torch.float32 if dtype == torch.complex32 else torch.float64
make_arg_non_complex = partial(make_tensor, dtype=non_complex_dtype, device=device, requires_grad=requires_grad)
samples.append(SampleInput(make_arg_non_complex(shape), args=tuple(make_arg(s) for s in other_shapes)))
return samples
def sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs):
small_S = 2
test_cases = (
((S, S, 2), (S, S + 1, 2)),
((S, S), (S, S)),
((S, S, S), (S, S, S)),
((3, 5), (3, 5)),
((2, 3, 5), (2, 3, 5)),
((1, 2, 3), (1, 2, 3)),
((1, 1), (S, 1)),
((0, 5), (4, 5)),
((4, 5), (0, 5)),
((0, 4, 5), (3, 5)),
((4, 5), (0, 3, 5)),
((0, 4, 5), (1, 3, 5)),
((1, 4, 5), (0, 3, 5)),
# Using S here would make this one test take 9s
((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)),
((small_S, 1, 1, small_S), (1, small_S, small_S)),
((1, 1, small_S), (small_S, 1, small_S, small_S)),
)
samples = []
for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:
# FIXME add an override for JIT and revert 0. back to 0
# since it's accepted by eager
for p in [0., 1., 2., 3., 0.5, 1.5, 2.5, float("inf")]:
for t1_size, t2_size in test_cases:
# The args should never be non-contiguous as this is not supported in the backward
samples.append(SampleInput(
make_tensor(t1_size, dtype=dtype, device=device, requires_grad=requires_grad),
args=(make_tensor(t2_size, dtype=dtype, device=device, requires_grad=requires_grad), p, cm)))
return samples
def sample_inputs_fill_(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
cases = (((S, S, S), (1,)),
((), (1,)),
((S, S, S), (make_arg(()),)))
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
def _fill_np(a, value):
a = a.copy()
a.fill(value)
return a
def _fill_aten(a, value):
t = a * False
with torch.no_grad():
t.fill_(value)
return t
def _fill_sample_kwargs(device, dtype, input):
if dtype is torch.bool:
value = True
else:
value = 3
return ({'value': value}, {'value': value})
def sample_inputs_comparison_ops(op, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs)
# Adds a sample input where both tensors have the same values
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
lhs = make_arg((S, S))
yield SampleInput(lhs, args=(lhs.clone(),))
def sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# shape x number of tensors
cases = (
((3, 4), 1),
((1, 2, 1, 4), 3),
((0, 1, 0), 2),)
for shape, num_tensors in cases:
tensors = []
for _ in range(num_tensors):
tensors.append(make_arg(shape))
for dim in range(-1, len(shape) - 1):
yield SampleInput(tensors, args=(dim,))
def sample_inputs_cat_concat(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases: Tuple[tuple, tuple, dict] = ( # type: ignore[assignment]
((S, S), (S, S), {'dim': -1}),
((S, S), (S, S), {'dim': 1}),
((M, S), (S, S), {'dim': 0}), # different shapes
((1, 2, 3), (1, 2, 3), {'dim': -2}),
((0,), (0,), {'dim': 0}), # empty tensor
((0, S), (S, S), {'dim': 0}),
((1,), (1,), {}) # dim not passed, fallback to default
)
for input_shape1, input_shape2, kwargs in cases:
yield SampleInput([make_arg(input_shape1), make_arg(input_shape2)], kwargs=kwargs)
def reference_inputs_cat(op, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_cat_concat(op, device, dtype, requires_grad, **kwargs)
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Noncontiguous type promoting tensors
a = make_arg((3, 4, 2))
b = make_arg((3, 2, 2), noncontiguous=True, dtype=torch.double)
c = make_arg((3, 3, 2), dtype=torch.float16).permute(1, 0, 2)
yield SampleInput((a, b, c), kwargs={'dim': 1})
# Special 1D tensor with dim length of 0 case
a = make_arg((0,))
b = make_arg((3, 2, 2))
yield SampleInput((a, b, a))
yield SampleInput((a, a, a))
def _elementwise_type_promo_np(*args, type_promotion_kind):
def _maybe_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x)
return x
flattened = tree_flatten(args)[0]
transformed = tuple(_maybe_torch(a) for a in flattened)
result_dtype, _ = prims.utils.elementwise_dtypes(
*transformed,
type_promotion_kind=type_promotion_kind)
return torch_to_numpy_dtype_dict[result_dtype]
def _cat_np(input_seq, dim=0):
inputs = tuple(a for a in input_seq if not (a.ndim == 1 and a.size == 0))
if len(inputs) == 0:
np_dtype = _elementwise_type_promo_np(
input_seq,
type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH)
return np.empty(0, dtype=np_dtype)
return np.concatenate(inputs, axis=dim)
def _floor_divide_np(a, b):
dtype = _elementwise_type_promo_np(
a,
b,
type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)
if isinstance(a, np.ndarray):
a = a.astype(dtype)
if isinstance(b, np.ndarray):
b = b.astype(dtype)
return np.floor_divide(a, b)
def sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
tensor_shapes = (
# First Tensor being 1-D is special
# case for hstack
((S,), (S,), (S,)),
((S, S), (S, S), (S, S)),
)
for s1, s2, s3 in tensor_shapes:
tensors = (make_arg(s1,), make_arg(s2,), make_arg(s3))
yield SampleInput(tensors)
def error_inputs_hstack_dstack_vstack(op, device):
make_arg = partial(make_tensor, dtype=torch.int32, device=device, requires_grad=False)
tensor_shapes = (
((S,), (S, S, S, S), (S,)),
)
for s1, s2, s3 in tensor_shapes:
tensors = (make_arg(s1,), make_arg(s2,), make_arg(s3))
# Different dimension tensor
yield ErrorInput(SampleInput(tensors), error_regex="Tensors must have same number of dimensions")
# empty tensor list
yield ErrorInput(SampleInput(()), error_regex="expects a non-empty TensorList")
def sample_inputs_unbind(op_info, device, dtype, requires_grad, **kwargs):
# Note: we don't do any tests where we unbind along 0-length dims
# because in that case unbind returns and empty tuple, and that breaks
# some asumptions in some backward tests in test_ops.py
shape_dims = (((S,), 0),
((S, S), 0),
((S, S), 1),
((S, S), -1),
((S, 0, S), 0),
((S, S, S), 1),
)
for shape, dim in shape_dims:
yield SampleInput(make_tensor(shape, dtype=dtype, device=device,
requires_grad=requires_grad),
args=(dim,))
def error_inputs_unbind(op_info, device):
make_arg = partial(make_tensor, dtype=torch.int32, device=device, requires_grad=False)
yield ErrorInput(SampleInput(make_arg(()), args=(0,)), error_type=IndexError,
error_regex="dimension specified as 0 but tensor has no dimensions")
yield ErrorInput(SampleInput(make_arg((2,)), args=(2,)), error_type=IndexError,
error_regex="Dimension out of range")
def reference_unbind(t, dim):
"""A numpy implementation of torch.unbind"""
return tuple(s.squeeze(dim) for s in np.split(t, t.shape[dim], dim))
def sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(0, gather_variable((S, S), 1, M, True, device=device))),
SampleInput(
make_tensor((M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(1, gather_variable((M, S // 2), 0, S, True, device=device))),
SampleInput(
make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor([0], dtype=torch.int64, device=device))),
# Empty index tensor case, see: https://github.com/pytorch/pytorch/pull/65006
SampleInput(
make_tensor((S,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor([], dtype=torch.uint8, device=device))),
SampleInput(
make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor(0, dtype=torch.int64, device=device))),
)
def _fill_indices(idx, dim, dim_size, elems_per_row, m, n, o):
for i in range(1 if dim == 0 else m):
for j in range(1 if dim == 1 else n):
for k in range(1 if dim == 2 else o):
ii = [i, j, k]
ii[dim] = slice(0, idx.size(dim) + 1)
idx[tuple(ii)] = torch.randperm(dim_size)[0:elems_per_row]
def error_inputs_gather(op_info, device, **kwargs):
# src is [1, 2]
# [3, 4]
src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32)
# idx is [0, 0]
# [1, 0]
idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long)
# Index should be smaller than self except on dimesion 1
bad_src = make_tensor((1, 1), device=device, dtype=torch.float32)
yield ErrorInput(SampleInput(bad_src, args=(1, idx,)),
error_regex="Size does not match at dimension 0")
# Index must have long dtype
bad_idx = idx.to(torch.int32)
yield ErrorInput(SampleInput(src, args=(1, bad_idx)),
error_regex="Expected dtype int64 for index")
# TODO: FIXME
# out.dtype must match src.dtype
# Creates new src & idx since SampleInputs can't share tensors
src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32)
idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long)
out = torch.empty((2, 2), device=device, dtype=torch.float64)
yield ErrorInput(SampleInput(src, args=(1, idx), kwargs={'out': out}),
error_regex="Expected out tensor to have dtype")
# src and index tensors must have the same # of dimensions
# idx too few dimensions
src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32)
idx = torch.tensor((0, 0), device=device, dtype=torch.long)
yield ErrorInput(SampleInput(src, args=(1, idx)),
error_regex="Index tensor must have the same number of dimensions")
# src too few dimensions
src = torch.tensor((1, 2), device=device, dtype=torch.float32)
idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long)
yield ErrorInput(SampleInput(src, args=(0, idx)),
error_regex="Index tensor must have the same number of dimensions")
# index out of bounds
# NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices
if torch.device(device).type == 'cpu':
src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32)
idx = torch.tensor(((0, 23), (1, 0)), device=device, dtype=torch.long)
yield ErrorInput(SampleInput(src, args=(1, idx,)),
error_regex="index 23 is out of bounds for dimension")
x = torch.rand((1,), device=device).expand((3,))
src = torch.rand((6,), device=device)
ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64)
yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=x)),
error_type=RuntimeError,
error_regex='unsupported operation')
yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=src)),
error_type=RuntimeError,
error_regex='unsupported operation')
yield ErrorInput(SampleInput(ind.clone(), args=(0, ind[1:],), kwargs=dict(out=ind[:1])),
error_type=RuntimeError,
error_regex='unsupported operation')
def error_inputs_take(op_info, device, **kwargs):
x = torch.rand((1,), device=device).expand((3,))
src = torch.rand((6,), device=device)
ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64)
yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=x)),
error_type=RuntimeError,
error_regex='unsupported operation')
yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=src)),
error_type=RuntimeError,
error_regex='unsupported operation')
yield ErrorInput(SampleInput(ind.clone(), args=(ind[1:],), kwargs=dict(out=ind[:-1])),
error_type=RuntimeError,
error_regex='unsupported operation')
# Error inputs for scatter
def error_inputs_scatter_and_scatter_add(op_info, device, **kwargs):
# Error when self.dtype != src.dtype (and src is not a scalar)
src = make_tensor((2, 5), device=device, dtype=torch.float32)
idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long)
dst = torch.zeros((3, 5), device=device, dtype=torch.double)
yield ErrorInput(SampleInput(dst, args=(0, idx, src)),
error_regex="Expected self.dtype to be equal to src.dtype")
# Index dtype must be long
src = make_tensor((2, 5), device=device, dtype=torch.float32)
idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.int32)
dst = torch.zeros((3, 5), device=device, dtype=torch.float32)
yield ErrorInput(SampleInput(dst, args=(0, idx, src)),
error_regex="Expected dtype int64 for index")
# Index and destination must have the same number of dimensions
src = make_tensor((2, 5), device=device, dtype=torch.float32)
idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long)
dst = torch.zeros((3, 5, 3), device=device, dtype=torch.float32)
yield ErrorInput(SampleInput(dst, args=(0, idx, src)),
error_regex="Index tensor must have the same number of dimensions as self tensor")
# Index and src must have the same number of dimensions when src is not a scalar
src = make_tensor((2, 5, 2), device=device, dtype=torch.float32)
idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long)
dst = torch.zeros((3, 5), device=device, dtype=torch.float32)
yield ErrorInput(SampleInput(dst, args=(0, idx, src)),
error_regex="Index tensor must have the same number of dimensions as src tensor")
# Index out of bounds
# NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices
if torch.device(device).type == 'cpu':
src = make_tensor((2, 5), device=device, dtype=torch.float32)
idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long)
dst = torch.zeros((3, 5), device=device, dtype=torch.float32)
yield ErrorInput(SampleInput(dst, args=(0, idx, src)),
error_regex="index 34 is out of bounds for dimension 0 with size 3")
def error_inputs_renorm(op_info, device, **kwargs):
zero_d = torch.randn((), device=device)
yield ErrorInput(SampleInput(zero_d, args=(0.5, 0, 1.0)), error_type=RuntimeError,
error_regex="needs at least 2 dimensions, got 0 dimensions")
def error_inputs_lstsq(op_info, device, **kwargs):
zero_d = torch.randn((), device=device)
yield ErrorInput(SampleInput(zero_d, args=(zero_d,)), error_type=RuntimeError,
error_regex="at least 2 dimensions")
def error_inputs_lstsq_grad_oriented(op_info, device, **kwargs):
zero_d = torch.randn((), device=device)
yield ErrorInput(SampleInput(zero_d, args=(zero_d, None)), error_type=RuntimeError,
error_regex="at least 2 dimensions")
def error_inputs_eig(op_info, device, **kwargs):
zero_d = torch.randn((), device=device)
yield ErrorInput(SampleInput(zero_d, args=(False,)), error_type=RuntimeError,
error_regex="input should be 2 dimensional")
yield ErrorInput(SampleInput(zero_d, args=(True,)), error_type=RuntimeError,
error_regex="input should be 2 dimensional")
def error_inputs_ormqr(op_info, device, **kwargs):
# this is only implemented on cpu
if (torch.device(device).type == 'cpu'):
zero_d = torch.randn((), device=device)
yield ErrorInput(SampleInput(zero_d, args=(zero_d, zero_d)), error_type=RuntimeError,
error_regex="input must have at least 2 dimensions")
def error_inputs_diag(op_info, device, **kwargs):
zero_d = torch.randn((), device=device)
yield ErrorInput(SampleInput(zero_d, args=(0,)), error_type=RuntimeError,
error_regex="matrix or a vector expected")
zero_d = torch.randn(1, 1, 1, device=device)
yield ErrorInput(SampleInput(zero_d, args=(0,)), error_type=RuntimeError,
error_regex="matrix or a vector expected")
def error_inputs_embedding(op_info, device, **kwargs):
indices = torch.rand(2, 2, device=device).long()
weights = [
torch.tensor(1.0, device=device),
torch.tensor(1.0, device=device).reshape(1, 1, 1),
]
for weight in weights:
yield ErrorInput(SampleInput(weight, args=(indices,)), error_type=RuntimeError,
error_regex="'weight' must be 2-D")
def error_inputs_t(op_info, device, **kwargs):
yield ErrorInput(
SampleInput(torch.randn(2, 3, 4, 5, device=device)),
error_regex="expects a tensor with <= 2",
)
def error_inputs_multinomial(op_info, device, **kwargs):
x = torch.empty(1, 2, 3, dtype=torch.double, device=device)
yield ErrorInput(SampleInput(x, args=(2,)), error_type=RuntimeError,
error_regex="prob_dist must be 1 or 2 dim")
x = torch.empty(1, 2, dtype=torch.long, device=device)
yield ErrorInput(SampleInput(x, args=(2,)), error_type=RuntimeError,
error_regex="multinomial only supports floating-point dtypes for input")
x = torch.empty(1, 2, dtype=torch.double, device=device)
y = torch.empty(1, 2, dtype=torch.double, device=device)
yield ErrorInput(SampleInput(x, args=(2,), kwargs=dict(out=y)), error_type=RuntimeError,
error_regex="multinomial expects Long tensor out")
x = torch.empty(2, dtype=torch.double, device=device)
yield ErrorInput(SampleInput(x, args=(0,)), error_type=RuntimeError,
error_regex="cannot sample n_sample <= 0 samples")
x = torch.empty(2, dtype=torch.double, device=device)
yield ErrorInput(SampleInput(x, args=(-1,)), error_type=RuntimeError,
error_regex="cannot sample n_sample <= 0 samples")
x = torch.empty(2, dtype=torch.double, device=device)
yield ErrorInput(SampleInput(x, args=(3, False,)), error_type=RuntimeError,
error_regex="cannot sample n_sample > prob_dist")
x = torch.empty(16777217, dtype=torch.double, device=device)
yield ErrorInput(SampleInput(x, args=(3,)), error_type=RuntimeError,
error_regex="number of categories cannot exceed")
def error_inputs_gradient(op_info, device, **kwargs):
for dtype in [torch.long, torch.float32, torch.complex64]:
t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device, dtype=dtype)
dim = (1, 0)
spacing = [0.1]
yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)),
error_type=RuntimeError,
error_regex='torch.gradient expected spacing to be unspecified, a scalar ')
yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=3)),
error_type=RuntimeError,
error_regex='torch.gradient only supports edge_order=1 and edge_order=2.')
dim = (1, 1)
spacing = 0.1
yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)),
error_type=RuntimeError,
error_regex='dim 1 appears multiple times in the list of dims')
dim = (0, 1)
coordinates = [torch.tensor([1, 2, 4], device='cpu'), torch.tensor([1, 2, 4], device='meta')]
yield ErrorInput(SampleInput(t, kwargs=dict(spacing=coordinates, dim=dim, edge_order=1)),
error_type=RuntimeError,
error_regex='torch.gradient expected each tensor to be on the same device,')
yield ErrorInput(SampleInput(t, kwargs=dict(dim=3)),
error_type=IndexError, error_regex='')
t = torch.tensor([[1], [2], [3]])
yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=1)),
error_type=RuntimeError,
error_regex='torch.gradient expected each dimension size to be at least')
t = torch.tensor([[1, 2], [3, 4]])
yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=2)),
error_type=RuntimeError,
error_regex='torch.gradient expected each dimension size to be at least')
def error_inputs_masked_select(op_info, device, **kwargs):
x = torch.rand((1,), device=device).expand((3,))
y = torch.rand((6,), device=device)
mask = torch.tensor([True, False, True, True, False, False], device=device)
yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=x)),
error_type=RuntimeError,
error_regex='unsupported operation')
yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=y)),
error_type=RuntimeError,
error_regex='unsupported operation')
yield ErrorInput(SampleInput(mask.clone(), args=(mask,), kwargs=dict(out=mask)),
error_type=RuntimeError,
error_regex='unsupported operation')
def error_inputs_index_select(op_info, device, **kwargs):
x = torch.rand((1, 6), device=device).expand((2, 6))
y = torch.rand((3, 6), device=device)
ind = torch.tensor([0, 1], dtype=torch.int64, device=device)
yield ErrorInput(SampleInput(y, args=(1, ind,), kwargs=dict(out=x)),
error_type=RuntimeError,
error_regex='unsupported operation')
def error_inputs_logcumsumexp(op_info, device, **kwargs):
dim = 3
srcs = [torch.randn(5, 2, device=device), torch.randn(0, 2, device=device)]
for src in srcs:
yield ErrorInput(SampleInput(src, args=(dim,)),
error_type=IndexError,
error_regex='Dimension out of range')
def sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs):
return (SampleInput(make_tensor((S, S), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S), 1, S, True, device=device), 0)),
# `indices` broadcast
SampleInput(make_tensor((S, S), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((1, S // 2), 0, S, True, device=device), 1)),
# `self` broadcast
SampleInput(make_tensor((1, S), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S // 2), 0, S, True, device=device), 1)),
# without `dim` arg
SampleInput(make_tensor((S, S), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S // 2), 0, S, True, device=device), )),
SampleInput(make_tensor((S, S), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S // 2), 0, S, True, device=device),)),
)
def error_inputs_aminmax_amax_amin(op_info, device, **kwargs):
# Error Inputs for zero-dim tensors, when 'dim' arg is not provided.
shape = (S, 0, S)
err_msg_amax_amin = "reduction"
err_msg_aminmax = "cannot compute aminmax over an empty dimension as the operation has no identity"
if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']:
yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_amax_amin)
elif op_info.name in ['aminmax']:
yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_aminmax)
# Error Inputs for tensors with more than 64 dimension
sizes = [1] * 65
err_msg1 = "only tensors with up to 64 dims are supported"
yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': -1}),
error_regex=err_msg1)
yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': 64}),
error_regex=err_msg1)
# Error Inputs for repeated 'dim'
if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']:
dims = [(0, 0), (0, -4)]
err_msg2 = "in the list of dims"
x = torch.randn(S, S, S, S, device=device)
for dim in dims:
yield ErrorInput(SampleInput(x, kwargs={'dim': dim}), error_regex=err_msg2)
# Error Input for illegal dtype
input5 = torch.randn(L, L, dtype=torch.float32, device=device)
max_values = torch.empty(L, dtype=torch.float32, device=device)
min_values = torch.empty(L, dtype=torch.double, device=device)
illegal_values = torch.empty(L, dtype=torch.int, device=device)
err_msg_amax_amin2 = "Expected the dtype for input and out to match"
err_msg_aminmax2 = "Expected out tensor to have dtype float, but got double instead"
if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']:
yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': illegal_values}),
error_regex=err_msg_amax_amin2)
elif op_info.name in ['aminmax']:
yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': (max_values, min_values)}),
error_regex=err_msg_aminmax2)
# Error Inputs for functions to raise an error on specified zero'd dimension as reduction dim
err_msg3 = "reduction"
# FIXME: eager and ref impl throw different types of errors
error_type = IndexError if 'refs' not in op_info.name else RuntimeError
yield ErrorInput(SampleInput(torch.rand(shape, device=device), kwargs={'dim': 1}),
error_type=error_type, error_regex=err_msg3)
def sample_inputs_aminmax(op_info, device, dtype, requires_grad, **kwargs):
test_cases: Tuple[tuple, dict] = ( # type: ignore[assignment]
((S, S, S), {}),
((S, S, S), {'dim': 1}),
((S, S, S), {'dim': 1, 'keepdim': True}),
((), {'dim': 0}),
((), {}),
((), {'dim': 0, 'keepdim': True}),
)
samples: List[SampleInput] = []
for shape, kwargs in test_cases:
samples.append(SampleInput(
make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad),
kwargs=kwargs))
return samples
def sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
test_cases = (
((1,), 0, None, None),
((S,), 0, None, None),
((S, 1), 0, None, None),
((S, 1), 1, None, None),
((S, S), 0, None, None),
((S, S), 1, None, None),
((S, S), 0, (1, S), (2, S)),
((S, S), 0, None, (2, S)),
((XS, XS, XS), 1, None, None),
((XS, XS, XS), 2, None, None),
((XS, XS, XS), 1, (XS, 1, XS), (XS, 1, XS)),
((XS, XS, XS), 2, (XS, XS, 1), (XS, XS, 1)),
((XS, XS, XS), 2, (XS, XS, XS), (XS, XS, XS)),)
sample_inputs = []
for size, dim, size_prepend, size_append in test_cases:
prepend_size = 0 if (size_prepend is None) else size_prepend[dim]
append_size = 0 if (size_append is None) else size_append[dim]
dim_size = size[dim] + prepend_size + append_size
for n in range(dim_size):
input_tensor = make_arg(size)
prepend = make_arg(size_prepend) if size_prepend else None
append = make_arg(size_append) if size_append else None
sample_inputs.append(SampleInput(input_tensor, args=(n, dim, prepend, append,)))
# add some samples with n > dim_size
sample_inputs.append(SampleInput(make_arg((XS, XS, XS)), args=(S + 1, 1,)))
sample_inputs.append(SampleInput(make_arg((XS, XS, XS)), args=(S * 3 + 2, 2, make_arg((XS, XS, XS)), make_arg((XS, XS, XS)),)))
return sample_inputs
def sample_inputs_histogram(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
sample_inputs = []
for size, bin_ct, weighted, density in product(sizes, range(1, 5), [False, True], [False, True]):
input_tensor = make_arg(size)
weight_tensor = make_arg(size) if weighted else None
sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,),
kwargs=dict(weight=weight_tensor, density=density)))
bins_tensor = make_arg((bin_ct + 1,))
sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,),
kwargs=dict(weight=weight_tensor, density=density)))
return sample_inputs
def sample_inputs_histogramdd(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((S, S), (S, S, S), (S, 1, S), (S, 0, S))
bin_ct_patterns = ((1, 1, 1, 1, 1), (2, 3, 2, 3, 2), (3, 2, 3, 2, 3))
sample_inputs = []
for size, bin_ct_pattern, weighted, density in product(sizes, bin_ct_patterns, [False, True], [False, True]):
input_tensor = make_arg(size)
bin_ct = bin_ct_pattern[:size[-1]]
weight_tensor = make_arg(size[:-1]) if weighted else None
sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,),
kwargs=dict(weight=weight_tensor, density=density)))
bins_tensor = [make_arg(ct + 1) for ct in bin_ct]
sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,),
kwargs=dict(weight=weight_tensor, density=density)))
return sample_inputs
def sample_inputs_histc(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
sample_inputs = []
for size, min, max in product(sizes, [0, -10], [0, 10]):
# construct sample input omitting bins arg
sample_inputs.append(SampleInput(make_arg(size),
kwargs=dict(min=min, max=max)))
# construct sample inputs with a few different bins values
for bins in [1, 3, 10]:
sample_inputs.append(SampleInput(make_arg(size),
kwargs=dict(bins=bins, min=min, max=max)))
return sample_inputs
def sample_inputs_bincount(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sample_inputs = []
for size, weighted in product((S, M), [False, True]):
input_tensor = torch.randint(0, size, (size,), dtype=dtype, device=device)
weight_tensor = make_arg((size,)) if weighted else None
max_val = int(input_tensor.max().item())
for minlength in [0, max_val // 2, max_val, 2 * max_val]:
sample_inputs.append(SampleInput(input_tensor,
kwargs=dict(weights=weight_tensor, minlength=minlength)))
return sample_inputs
def sample_inputs_bucketize(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
sample_inputs = []
for size, out_int32, right in product(sizes, [False, True], [False, True]):
input_tensor = make_arg(size)
boundaries = make_arg((S,)).msort()
sample_inputs.append(SampleInput(input_tensor, args=(boundaries, ),
kwargs=dict(out_int32=out_int32, right=right)))
return sample_inputs
def sample_inputs_searchsorted(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((0,), (M,), (0, 0), (M, M), (0, 0, 0), (M, M, M))
inputs = []
for size, noncontiguous, out_int32, right in product(sizes, [False, True], [False, True], [False, True]):
unsorted_tensor = make_arg(size, noncontiguous=noncontiguous)
input_tensor = make_arg(size, noncontiguous=noncontiguous)
if np.product(size) == 0:
boundary_tensor = unsorted_tensor
sorter = make_tensor(size, dtype=torch.int64, device=device, noncontiguous=noncontiguous)
else:
boundary_tensor, sorter = torch.sort(unsorted_tensor)
side = "right" if right else "left"
inputs.append(SampleInput(boundary_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, right=right)))
inputs.append(SampleInput(boundary_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, side=side)))
inputs.append(
SampleInput(unsorted_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, right=right, sorter=sorter)))
inputs.append(
SampleInput(unsorted_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, side=side, sorter=sorter)))
return inputs
def sample_inputs_gradient(op_info, device, dtype, requires_grad, **kwargs):
sample_inputs = []
test_cases_float = (
((S,), None, None, 1),
((S,), 2., None, 1),
((S, S), None, None, 2),
((S, S), [2.0, 2.1], None, 1),
((S, S), [2.0, 2.1], (0, 1), 1),
((4, 4, 4), [2., 1.], (0, 1), 2),
)
for size, spacing, dim, edge_order in test_cases_float:
t = make_tensor(size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)
sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=spacing, edge_order=edge_order)))
test_cases_tensor = (
((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1), 1),
((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1), 2),
)
for size, coordinates, dim, edge_order in test_cases_tensor:
t = make_tensor(size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)
coordinates_tensor_list = []
for coords in coordinates:
# `coords` will always contain floating point values and Python 3.10 does not support this
# implicit conversion to an integer using `__int__`
# TODO: this can be simplified after https://github.com/pytorch/pytorch/issues/69316 is fixed
a = torch.tensor(coords, device=device)
coordinates_tensor_list.append(a.to(dtype))
sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=coordinates_tensor_list, edge_order=edge_order)))
return tuple(sample_inputs)
def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
test_args = [
([1, 2],),
(slice(0, 3),),
([slice(0, 3), 1],),
([[0, 2, 3], [1, 3, 3], [0, 0, 2]],),
([[0, 0, 3], [1, 1, 3], [0, 0, 2]],),
([slice(None), slice(None), [0, 3]],),
([slice(None), [0, 3], slice(None)],),
([[0, 3], slice(None), slice(None)],),
([[0, 3], [1, 2], slice(None)],),
([[0, 3], ],),
([[0, 3], slice(None)],),
([[0, 3], Ellipsis],),
([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],),
(index_variable(2, S, device=device),),
(mask_not_all_zeros((S,)),),
]
for args in test_args:
yield SampleInput(make_arg((S, S, S)), args=args)
yield SampleInput(make_arg((S, S, S, S)), args=([slice(None), [0, 1], slice(None), [0, 1]],))
def sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
inputs = []
for accumulate in [False, True]:
# Test with indices arg
inputs.append(SampleInput(
make_arg((S, S,)),
args=((index_variable(2, S, device=device),), make_arg((2, S))),
kwargs=dict(accumulate=accumulate)))
# Test with mask arg
mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,))
inputs.append(SampleInput(
make_arg((S, S)),
args=((mask, ), make_arg((S,))),
kwargs=dict(accumulate=accumulate)))
return inputs
def sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs):
def small_3d_unique():
res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S)
res = res.to(dtype).requires_grad_(requires_grad)
return res
def large_1d_unique():
res = torch.randperm(L * L * L, dtype=torch.int64, device=device)
res = res.to(dtype).requires_grad_(requires_grad)
return res
samples = []
# Test case for large tensor.
samples.append(SampleInput(large_1d_unique()))
# Test cases for small 3d tensors.
# Imitates legacy tests from test/test_torch.py
dims = range(-3, 3)
flag = [True, False]
for dim, descending, stable in product(dims, flag, flag):
# default schema without stable sort
samples.append(SampleInput(small_3d_unique(),
args=(dim, descending)))
# schema with stable sort, no CUDA support yet
if torch.device(device).type == 'cpu':
samples.append(
SampleInput(small_3d_unique(),
kwargs=dict(dim=dim, descending=descending, stable=stable))
)
# Test cases for scalar tensor
samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad)))
samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad),
args=(0,)))
samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad),
args=(0, True)))
# Test cases for stable sort
samples.append(SampleInput(small_3d_unique(),
kwargs=dict(stable=True)))
samples.append(SampleInput(small_3d_unique(),
kwargs=dict(dim=0, stable=True)))
samples.append(SampleInput(small_3d_unique(),
kwargs=dict(dim=0, descending=True, stable=True)))
return samples
def sample_inputs_threshold(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S))
samples = []
for x_size in sizes:
# threshold and values args must be numbers
samples.append(SampleInput(make_arg(x_size), args=(make_arg(()).item(), make_arg(()).item())))
return samples
def sample_inputs_argsort(*args, **kwargs):
return [sample_input for sample_input in sample_inputs_sort(*args, **kwargs) if "stable" not in sample_input.kwargs]
def sample_inputs_unique(op_info, device, dtype, requires_grad, **kwargs):
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
sample_inputs = []
for shape, sorted, return_inverse, return_counts, dim in \
product(sizes, [False, True], [False, True], [False, True], [None, -2, -1, 0, 1, 2]):
# torch.unique cannot be called if the input tensor has a zero dimension which isn't the selected dim
if 0 in shape and shape.index(0) is not dim:
continue
# skip invalid dim args
if dim is not None and (dim < -len(shape) or dim >= len(shape)):
continue
kwargs = dict(sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim)
# construct a test case with only one distinct value
input_t = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad)
sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy()))
# construct a test case with mixed 0s and 1s
input_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)\
.to(dtype).requires_grad_(requires_grad)
sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy()))
# construct a test case with many different values
input_t = make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad)
sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy()))
return sample_inputs
def sample_inputs_unique_consecutive(*args, **kwargs):
for sample_input in sample_inputs_unique(*args, **kwargs):
if not sample_input.kwargs["sorted"]:
sample_input.kwargs.pop("sorted")
yield sample_input
def sample_inputs_adaptive_avg_pool1d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
((0, 8, 8), (5,)),
((3, 8, 8), 5),
((3, 8, 8), 1)
)
for input_shape, output_size in cases:
# Batched
yield SampleInput(make_arg(input_shape), args=(output_size,))
# Unbatched
yield SampleInput(make_arg(input_shape[1:]), args=(output_size,))
def sample_inputs_adaptive_avg_pool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
((1, 8, 8, 8), (5, 7)),
((2, 8, 8, 8), (None, 7)),
((1, 8, 4, 3), (5, None)),
((1, 8, 4, 3), (None, None)),
((1, 8, 4, 3), (5)),
)
for input_shape, output_size in cases:
# Batched
yield SampleInput(make_arg(input_shape), args=(output_size,))
# Unbatched
yield SampleInput(make_arg(input_shape[1:]), args=(output_size,))
def sample_inputs_adaptive_avg_pool3d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
((0, 8, 8, 8, 8), (5, 7, 4)),
((1, 8, 4, 3, 7), (None, None, None)),
((1, 8, 4, 3, 7), (1, 1, 1)),
((3, 3, 8, 8, 6), (5, 7, None)),
((1, 3, 8, 8, 6), (5, None, 2)),
((3, 3, 8, 8, 6), (None, 3, 2)),
)
for input_shape, output_size in cases:
# Batched
yield SampleInput(make_arg(input_shape), args=(output_size,))
# Unbatched
yield SampleInput(make_arg(input_shape[1:]), args=(output_size,))
def sample_inputs_adaptive_max_pool1d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
# ((0, 8, 8), (5,)),
# 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1]
((3, 4, 4), 3),
((3, 4, 4), 1)
)
for shapes, return_idx in product(cases, (True, False)):
# Batched
yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx))
# Unbatched
yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx))
def sample_inputs_adaptive_max_pool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
# ((0, 8, 8, 8), (5, 7)),
# 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1]
((1, 4, 4, 4), (2, 3)),
((2, 4, 4, 4), (None, 3)),
((2, 4, 4, 4), (1, 1)),
((1, 4, 4, 3), (3, None)),
((1, 4, 4, 3), (None, None)),
((1, 4, 4, 3), (3)),
)
for shapes, return_idx in product(cases, (True, False)):
# Batched
yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx))
# Unbatched
yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx))
def sample_inputs_adaptive_max_pool3d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
# ((0, 8, 8, 8, 8), (5, 7, 4)),
# 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1]
((1, 4, 4, 3, 5), (None, None, None)),
((1, 4, 4, 3, 5), (1, 1, 1)),
((3, 3, 4, 4, 6), (2, 3, None)),
((1, 3, 4, 4, 6), (3, None, 2)),
((3, 3, 4, 4, 6), (None, 3, 2)),
)
for shapes, return_idx in product(cases, (True, False)):
# Batched
yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx))
# Unbatched
yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx))
class _TestParamsMaxPoolBase(object):
def __init__(self):
self.kwargs = {
'kernel_size': [3],
'stride': [2, None],
'ceil_mode': [True, False],
'padding': [0, 1],
'dilation': [1],
'return_indices': [True, False]
}
self.shapes = [
[1, 2, None], # batch
[2], # channels
[3, 6] # signal
]
def _gen_shape(self):
for shape in product(*self.shapes):
# shape[0] is None indicates missing batch dimension
if shape[0] is None:
shape = shape[1:]
yield shape, torch.contiguous_format
# only 2d (N, C, H, W) rank 4 tensors support channels_last memory format
if len(self.shapes) == 4 and len(shape) == 4:
yield shape, torch.channels_last
def _gen_kwargs(self):
keys = self.kwargs.keys()
for values in product(*self.kwargs.values()):
yield dict(zip(keys, values))
def gen_input_params(self):
yield from product(self._gen_shape(), self._gen_kwargs())
class _TestParamsMaxPool1d(_TestParamsMaxPoolBase):
def __init__(self):
super().__init__()
self.kwargs['kernel_size'] += [(3,)]
self.kwargs['stride'] += [(2,)]
self.kwargs['padding'] += [(1,)]
self.kwargs['dilation'] += [(1,)]
class _TestParamsMaxPool2d(_TestParamsMaxPoolBase):
def __init__(self):
super().__init__()
self.kwargs['kernel_size'] += [(3, 2)]
self.kwargs['stride'] += [(2, 1)]
self.kwargs['padding'] += [(1, 1)]
self.kwargs['dilation'] += [(1, 2)]
self.shapes.append([6])
class _TestParamsMaxPool3d(_TestParamsMaxPoolBase):
def __init__(self):
super().__init__()
self.kwargs['kernel_size'] += [(3, 2, 3)]
self.kwargs['stride'] += [(2, 1, 2)]
self.kwargs['dilation'] += [(1, 2, 1)]
self.shapes.append([6])
self.shapes.append([5])
def sample_inputs_max_pool(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
params_generator_type_dict = {
'nn.functional.max_pool1d': _TestParamsMaxPool1d,
'nn.functional.max_pool2d': _TestParamsMaxPool2d,
'nn.functional.max_pool3d': _TestParamsMaxPool3d,
}
params_generator = params_generator_type_dict[op_info.name]()
for (shape, memory_format), kwargs in params_generator.gen_input_params():
arg = make_arg(shape).to(memory_format=memory_format).requires_grad_(requires_grad)
yield SampleInput(arg, kwargs=kwargs)
def sample_inputs_normalize(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, low=-1, high=1, device=device, dtype=dtype, requires_grad=requires_grad)
cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]
((2, 1, 4, 5), {'p': 1., 'dim': 2}),
((2, 3, 4, 5), {'p': 2., 'dim': 1}),
((1, 2, 4, 5), {'p': 0.5, 'dim': 0}),
((1, 3, 4, 5), {'p': -1., 'dim': 1}),
((1, 3, 4, 5), {'p': 0., 'dim': -1}),
((), {'p': 1.2, 'dim': 0}),
((2, 3, 4, 5), {}),
((2, 3, 4, 5), {'eps': 1e-4}))
for input_shape, kwargs in cases:
yield SampleInput(make_arg(input_shape), kwargs=kwargs)
def sample_inputs_conv_transpose1d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias
# and a dict of values of (stride, padding, output_padding, groups, dilation)
cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 3, 4), (3, 3, 3), (3,),
{'stride': (2,), 'padding': 2, 'output_padding': (1,), 'groups': 1}),
((2, 2, 4), (2, 2, 4), (4,),
{'stride': (3,), 'padding': (1,), 'output_padding': (2,), 'groups': 2, 'dilation': (4,)}),
((1, 1, 4), (1, 1, 4), (1,),
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2,)}),
((1, 1, 4), (1, 2, 3), None,
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),
((1, 4, 5), (4, 8, 3), None,
{})
)
for input_shape, weight, bias, kwargs in cases:
# Batched
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
# Unbatched
yield SampleInput(make_arg(input_shape[1:]), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
def sample_inputs_conv_transpose2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias
# and a dict of values of (stride, padding, output_padding, groups, dilation)
cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 3, 4, 4), (3, 3, 3, 3), (3,),
{'stride': (2, 2), 'padding': 2, 'output_padding': (1, 1), 'groups': 1}),
((2, 2, 4, 4), (2, 2, 4, 5), (4,),
{'stride': (3, 2), 'padding': (1, 2), 'output_padding': (2, 3), 'groups': 2, 'dilation': (4, 4)}),
((1, 1, 4, 5), (1, 1, 4, 3), (1,),
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3)}),
((1, 1, 4, 3), (1, 2, 3, 4), None,
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),
((1, 4, 5, 5), (4, 8, 3, 3), None,
{})
)
for input_shape, weight, bias, kwargs in cases:
# Batched
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
# Unbatched
yield SampleInput(make_arg(input_shape[1:]), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
def sample_inputs_conv_transpose3d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias
# and a dict of values of (stride, padding, output_padding, groups, dilation)
cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 3, 4, 4, 4), (3, 3, 3, 3, 3), (3,),
{'stride': (2, 2, 2), 'padding': 2, 'output_padding': (1, 1, 1), 'groups': 1}),
((2, 2, 4, 4, 4), (2, 2, 4, 5, 6), (4,),
{'stride': (3, 2, 1), 'padding': (1, 2, 3), 'output_padding': (2, 3, 1), 'groups': 2, 'dilation': (4, 4, 4)}),
((1, 1, 4, 5, 2), (1, 1, 4, 3, 1), (1,),
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3, 2)}),
((1, 1, 4, 3, 4), (1, 2, 3, 4, 5), None,
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),
((1, 4, 5, 5, 5), (4, 8, 3, 3, 3), None,
{})
)
for input_shape, weight, bias, kwargs in cases:
# Batched
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
# Unbatched
yield SampleInput(make_arg(input_shape[1:]), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
def sample_inputs_conv1d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias,
# and a dict of values of (stride, padding, dilation, groups)
cases: Tuple = (
((1, 3, 4), (3, 3, 3), (3,), {'stride': (2,), 'padding': 2, 'groups': 1}),
((2, 4, 8), (2, 2, 3), (2,), {'stride': 3, 'padding': 1, 'groups': 2, 'dilation': 2}),
((1, 4, 5), (1, 4, 3), None, {'stride': (2,), 'padding': 'valid'}),
((2, 2, 4), (2, 1, 4), (2,), {'stride': (1,), 'padding': 'same', 'groups': 2, 'dilation': (2,)}),
# With defaults
((1, 4, 5), (3, 4, 3), None, {}),
)
# TODO: (@krshrimali), add error_inputs_func once https://github.com/pytorch/pytorch/pull/67354 is merged
# Should replace test_conv_modules_raise_error_on_incorrect_input_size and test_conv_shapecheck
# in test/test_nn.py
for input_shape, weight, bias, kwargs in cases:
# Batched
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
# Unbatched
yield SampleInput(make_arg(input_shape[1:]), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
def sample_inputs_conv2d(op_info, device, dtype, requires_grad, jit_fail_sample=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias
# and a dict of values of (stride, padding, groups, dilation)
cases: Tuple = (
((1, 3, 4, 4), (3, 3, 3, 3), (3,),
{'stride': (2, 2), 'padding': 2, 'groups': 1}),
((2, 4, 8, 8), (2, 2, 3, 3), (2,),
{'stride': (3, 2), 'padding': (2, 1), 'groups': 2, 'dilation': (4, 4)}),
((1, 4, 5, 5), (1, 4, 2, 3), (1,),
{'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}),
((1, 4, 5, 5), (1, 4, 2, 3), (1,),
{'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}),
((1, 2, 4, 3), (4, 2, 3, 4), None,
{'stride': 2, 'padding': 1, 'groups': 1}),
((1, 4, 5, 5), (1, 4, 2, 3), (1,),
{'stride': 2, 'padding': "valid"}),
((1, 4, 5, 5), (1, 4, 2, 3), (1,),
{'stride': 1, 'padding': "same", 'dilation': 3}),
# Below are the group related samples from common_nn.py
((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4}),
((2, 4, 6, 6), (8, 1, 3, 3), (8,), {'groups': 4}),
((2, 4, 6, 6), (8, 1, 3, 3), None, {'groups': 4}),
((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'stride': (3, 2)}),
((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'padding': (1, 1)}),
((2, 4, 5, 5), (4, 1, 2, 2), (4,), {'groups': 4, 'dilation': (2, 2)}),
((2, 4, 6, 5), (6, 2, 3, 2), (6,), {'groups': 2}),
# With defaults
((1, 4, 5, 5), (3, 4, 3, 3), None, {}),
)
for input_shape, weight, bias, kwargs in cases:
# Batched
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
# Unbatched
yield SampleInput(make_arg(input_shape[1:]), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
def sample_inputs_group_norm(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as input shape, num groups, and eps
cases: Tuple[Tuple[int], int, float] = ( # type: ignore[assignment]
((1, 6, 3), 2, 0.5),
((2, 6, 3), 2, -0.5),
((1, 2), 1, None),
((0, 2), 1, None),
)
for input_shape, num_groups, eps in cases:
# Shape of weight and bias should be the same as num_channels
weight = make_arg(input_shape[1])
bias = make_arg(input_shape[1])
kwargs = {'weight': weight, 'bias': bias} if eps is None else {'weight': weight, 'bias': bias, 'eps': eps}
yield SampleInput(
make_arg(input_shape),
args=(num_groups,),
kwargs=kwargs
)
# Without any optional args
yield SampleInput(make_arg((1, 2)), args=(1,))
def sample_inputs_instance_norm(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
# Ordered as: input shape, kwargs for momentum, eps
cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]
((S, S, S), {'momentum': 0.5, 'eps': 0.6}),
((S, S, S), {'momentum': 0.5, 'eps': 0.6, 'use_input_stats': True}),
((3, 2, 4), {'momentum': -1.2}),
((3, 2, 4), {'momentum': 0.0}),
((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}),
((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}),
)
for input_shape, kwargs in cases:
# args: running mean, running var, weight and bias should necessarily be of shape: (channels,)
channels = input_shape[1]
weight = make_arg(channels)
bias = make_arg(channels)
running_mean = make_arg_without_requires_grad(channels, low=0)
running_var = make_arg_without_requires_grad(channels, low=0)
new_kwargs = {
'running_mean': running_mean,
'running_var': running_var,
'weight': weight,
'bias': bias,
**kwargs
}
yield SampleInput(
make_arg(input_shape),
args=(),
kwargs=new_kwargs
)
# Checking for permutations of weights and biases as `None`
# instance_norm assumes that if there's a bias, there's a weight
weights = [channels, None]
biases = [None, None]
for weight_channels, bias_channels in zip(weights, biases):
running_mean = make_arg_without_requires_grad(channels, low=0)
running_var = make_arg_without_requires_grad(channels, low=0)
yield SampleInput(
make_arg(input_shape),
args=(),
kwargs={
'running_mean': running_mean,
'running_var': running_var,
'weight': make_arg(weight_channels) if weight_channels is not None else None,
'bias': make_arg(bias_channels) if bias_channels is not None else None
}
)
# Test case for no optional kwargs
yield SampleInput(make_arg((1, 2, 3)), kwargs={})
def sample_inputs_layer_norm(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as input shape, normalized_shape and a kwarg dict for eps
cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 2, 3), (1, 2, 3), {'eps': 0.5}),
((2, 2, 3), (2, 3), {'eps': -0.5}),
((1,), (1,), {}),
((1, 2), (2,), {}),
((0, 1), (1,), {}),
)
for input_shape, normalized_shape, kwargs in cases:
# Shape of weight and bias should be the same as normalized_shape
weight = make_arg(normalized_shape)
bias = make_arg(normalized_shape)
yield SampleInput(
make_arg(input_shape),
args=(normalized_shape, weight, bias),
kwargs=kwargs
)
# Without any optional args
yield SampleInput(make_arg((1, 2)), args=((2,),))
# TODO: @krshrimali, once to_numpy method in SampleInput class is modified to take None inputs,
# enable these inputs; see https://github.com/pytorch/pytorch/pull/63276#discussion_r691950400
# With weight and a `None` bias
# yield SampleInput(make_arg((1, 2)), args=((2,), make_arg((2,)), None))
# With `None` weight and bias (tests failing for this, see the link above)
# yield SampleInput(make_arg((1, 2)), args=((2,), None, make_arg((2,))))
def sample_inputs_native_layer_norm(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as input shape, normalized_shape, eps
cases: Tuple[Tuple[int], Tuple[int], float] = ( # type: ignore[assignment]
((1, 2, 3), (1, 2, 3), 0.5),
((2, 2, 3), (2, 3), -0.5),
((1,), (1,), 1e-5),
((1, 2), (2,), 1e-5),
((0, 1), (1,), 1e-5),
)
for input_shape, normalized_shape, eps in cases:
# Shape of weight and bias should be the same as normalized_shape
weight = make_arg(normalized_shape)
bias = make_arg(normalized_shape)
yield SampleInput(
make_arg(input_shape),
args=(normalized_shape, weight, bias, eps),
)
yield SampleInput(
make_arg(input_shape),
args=(normalized_shape, None, bias, eps),
)
yield SampleInput(
make_arg(input_shape),
args=(normalized_shape, weight, None, eps),
)
yield SampleInput(
make_arg(input_shape),
args=(normalized_shape, None, None, eps),
)
def error_inputs_native_layer_norm(opinfo, device, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False)
input_shape = (1, 2, 3)
err_msg1 = "Expected normalized_shape to be at least 1-dimensional"
s1 = SampleInput(
make_arg(input_shape), args=(tuple(), None, None, 1e-5)
)
yield ErrorInput(s1, error_regex=err_msg1)
normalized_shape = (1, 2, 3)
weight = make_arg((1, 2))
err_msg2 = "Expected weight to be of same shape as normalized_shape"
s2 = SampleInput(
make_arg(input_shape), args=(normalized_shape, weight, None, 1e-5)
)
yield ErrorInput(s2, error_regex=err_msg2)
bias = make_arg((1, 2))
err_msg3 = "Expected bias to be of same shape as normalized_shape"
s3 = SampleInput(
make_arg(input_shape), args=(normalized_shape, None, bias, 1e-5)
)
yield ErrorInput(s3, error_regex=err_msg3)
err_msg4 = "Given normalized_shape="
s4 = SampleInput(
make_arg((2, 2, 3)), args=((2, 2), None, None, 1e-5)
)
yield ErrorInput(s4, error_regex=err_msg4)
def sample_inputs_local_response_norm(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as input shape, size and a kwarg dict for alpha, beta, and k
cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),
((1, 6, 3), 2, {'beta': 0.5, 'k': 1.25}),
((1, 6, 3), 2, {'alpha': 3e-05, 'k': 1.25}),
((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5}),
((1, 6, 3), 2, {'alpha': 3e-05}),
((1, 6, 3), 2, {'beta': 0.5}),
((1, 6, 3), 2, {'k': 1.25}),
((1, 6, 3), 2, {}),
((2, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),
((1, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),
((0, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),
)
for input_shape, size, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(size,), kwargs=kwargs)
def sample_inputs_hardswish(self, device, dtype, requires_grad, **kwargs):
N = 5
# make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ?
tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype,
requires_grad=requires_grad, low=-5, high=5)) for _ in range(1, N)]
return tensors
def sample_inputs_linear(self, device, dtype, requires_grad, **kwargs):
features_options = [[3, 4], [8, 8]]
batch_options: List[List[int]] = [
[], # no batch
[0],
[8],
[2, 3],
]
create_tensor = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-2, high=2)
sample_inputs = []
for has_bias, (in_feat, out_feat), batch_shape in \
itertools.product([True, False], features_options, batch_options):
input_tensor = create_tensor(batch_shape + [in_feat])
weight = create_tensor([out_feat, in_feat])
if not has_bias:
sample_inputs.append(SampleInput(input_tensor, args=(weight,)))
continue
bias = create_tensor([out_feat])
sample_inputs.append(SampleInput(input_tensor, args=(weight, bias)))
return sample_inputs
def sample_inputs_bilinear(self, device, dtype, requires_grad, **kwargs):
features_options = [[3, 4, 5], [8, 8, 8]]
batch_options: List[List[int]] = [
[], # no batch
[0],
[8],
[2, 3],
]
create_tensor = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-2, high=2)
sample_inputs = []
for has_bias, (in_feat1, in_feat2, out_feat), batch_shape in \
itertools.product([True, False], features_options, batch_options):
input_tensor1 = create_tensor(batch_shape + [in_feat1])
input_tensor2 = create_tensor(batch_shape + [in_feat2])
weight = create_tensor([out_feat, in_feat1, in_feat2])
if not has_bias:
sample_inputs.append(SampleInput(input_tensor1, args=(input_tensor2, weight,)))
continue
bias = create_tensor([out_feat])
sample_inputs.append(SampleInput(input_tensor1, args=(input_tensor2, weight, bias)))
return sample_inputs
def sample_inputs_glu(self, device, dtype, requires_grad, **kwargs):
features_options = [[2], [2, 4], [8, 8], [3, 6, 8], [1, 4, 6, 7]]
batch_options: List[List[int]] = [
[], # no batch
[0],
[8],
[2, 3],
]
create_tensor = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-2, high=2)
sample_inputs = []
for features, batch_shape in itertools.product(features_options, batch_options):
ndim = len(features) + len(batch_shape)
for dim in range(ndim):
input_tensor = create_tensor(batch_shape + features)
dim_size = input_tensor.size(dim)
if dim_size > 0 and dim_size % 2 == 0:
sample_inputs.append(SampleInput(input_tensor, args=(dim,)))
return sample_inputs
def sample_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs):
N, C = 2, 3
D = 4
S = 3
L = 5
align_corners_options: Tuple[Any, ...] = (None,)
if mode in ('linear', 'bilinear', 'bicubic', 'trilinear'):
align_corners_options = (True, False, None)
ranks_for_mode = {
'nearest': [1, 2, 3],
'linear': [1],
'bilinear': [2],
'bicubic': [2],
'trilinear': [3],
'area': [1, 2, 3]
}
def shape(size, rank, with_batch_channel=True):
if with_batch_channel:
return tuple([N, C] + ([size] * rank))
return tuple([size] * rank)
make_arg = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-1, high=1)
sample_inputs = []
for align_corners in align_corners_options:
for rank in ranks_for_mode[mode]:
sample_inputs.extend([
SampleInput(make_arg(shape(D, rank)),
args=(shape(S, rank, False), None, mode, align_corners)),
SampleInput(make_arg(shape(D, rank)),
args=(shape(L, rank, False), None, mode, align_corners)),
SampleInput(make_arg(shape(D, rank)),
args=(None, 1.7, mode, align_corners)),
SampleInput(make_arg(shape(D, rank)),
args=(None, 0.6, mode, align_corners)),
])
return sample_inputs
def sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs):
N, C = 2, 3
D = 4
S = 3
L = 5
ranks_for_mode = {
'nearest': [1, 2, 3],
'bilinear': [2],
}
def shape(size, rank, with_batch_channel=True):
if with_batch_channel:
return tuple([N, C] + ([size] * rank))
return tuple([size] * rank)
make_arg = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-1, high=1)
sample_inputs = []
for rank in ranks_for_mode[mode]:
sample_inputs.extend([
SampleInput(make_arg(shape(D, rank)),
kwargs=dict(size=shape(S, rank, False))),
SampleInput(make_arg(shape(D, rank)),
kwargs=dict(size=shape(L, rank, False))),
SampleInput(make_arg(shape(D, rank)),
kwargs=dict(scale_factor=1.7)),
SampleInput(make_arg(shape(D, rank)),
kwargs=dict(scale_factor=0.6)),
])
return sample_inputs
def sample_inputs_gelu(self, device, dtype, requires_grad, **kwargs):
N = 5
tensors = []
for _ in range(1, N):
for approximate in ['none', 'tanh']:
tensors.append(SampleInput(
make_tensor((N * 2, N * 2), device=device, dtype=dtype,
requires_grad=requires_grad, low=-3, high=3),
kwargs=dict(approximate=approximate)))
return tensors
def error_inputs_gelu(op, device, **kwargs):
# Tests thtat gelu errors out when passed an approximation we don't know.
yield ErrorInput(SampleInput(make_tensor((), dtype=torch.float, device=device), kwargs={"approximate": "asdf"}),
error_regex="approximate argument must be either")
def sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
args_for_reduction_with_dim = (
((S, S, S), (1,),),
((S, S, S), (1, True, ),),
((), (0,),),
((), (0, True,),),
)
inputs = list((SampleInput(make_tensor(input_tensor, dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
args=args,))
for input_tensor, args in args_for_reduction_with_dim)
return inputs
def sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
inputs.append(SampleInput(make_tensor((S, S, S), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),))
inputs.append(SampleInput(make_tensor((), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),))
return inputs
def _generate_nan_reduction_inputs(device, dtype, requires_grad, **kwargs):
yield from _generate_reduction_inputs(device, dtype, requires_grad)
# NaN only exists for floating point numbers
if dtype.is_complex or dtype.is_floating_point:
yield torch.tensor([2, torch.nan, -1], device=device, dtype=dtype, requires_grad=requires_grad)
yield torch.tensor([[torch.nan, 2], [0, 1]], device=device, dtype=dtype, requires_grad=requires_grad)
def sample_inputs_nan_reduction(supports_multiple_dims):
# Generates sample inputs for reduction ops that contain the input tensor
# and dim and keepdim kwargs. If a reduction op needs to test additional
# args/kwargs then create a separate sample_inputs function
def fn(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
for t in _generate_nan_reduction_inputs(device, dtype, requires_grad):
# Add case without dim and keepdim kwargs
inputs.append(SampleInput(t.clone().requires_grad_(requires_grad)))
for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):
inputs.append(SampleInput(t.clone().requires_grad_(requires_grad),
kwargs=kwargs))
return inputs
return fn
def sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad, **kwargs):
test_quantiles = (0.5, make_tensor((2,), dtype=dtype, device=device, low=0, high=1, requires_grad=requires_grad))
test_interpolations = ['linear', 'midpoint']
inputs = []
for quantiles in test_quantiles:
for t in _generate_reduction_inputs(device, dtype, requires_grad):
# Add case without dim and keepdim kwargs
inputs.append(SampleInput(t.clone().requires_grad_(requires_grad),
args=(quantiles,)))
for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False):
# Interpolation kwarg for now is only supported when providing both dim and keepdim
kwargs.setdefault('dim', 0)
kwargs.setdefault('keepdim', False)
for interpolation in test_interpolations:
kwargs['interpolation'] = interpolation
inputs.append(SampleInput(t.clone().requires_grad_(requires_grad),
args=(quantiles,), kwargs=kwargs))
return inputs
def sample_inputs_reduction_count_nonzero(*args, **kwargs):
"""Sample inputs for count_nonzero"""
# count_nonzero does not support keepdim yet
for sample in sample_inputs_reduction(*args, **kwargs):
sample.kwargs.pop('keepdim', None)
yield sample
def sample_inputs_leaky_relu(op_info, device, dtype, requires_grad, **kwargs):
N = 10
tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype,
requires_grad=requires_grad)) for _ in range(1, N)]
return tensors
def sample_inputs_fractional_max_pool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size
cases = (((1, 3, 9, 9), 3),
((1, 3, 9, 9), (4, 4)),
((1, 3, 9, 9), (6, 6)),
((2, 3, 9, 9), (3, 3)),
((1, 1, 4, 4), (2, 2)),
((1, 2, 6, 6), (4, 4)))
samples = []
for input_shape, kernel_size in cases:
for return_indices in [False, True]:
# test case passing a single output size
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_size=(2), return_indices=return_indices)
))
# test case passing a tuple output size
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_size=(2, 3), return_indices=return_indices)
))
# test case passing an output ratio
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_ratio=(0.5, 0.5), return_indices=return_indices)
))
return samples
def sample_inputs_fractional_max_pool3d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size
cases = (((2, 3, 5, 5, 5), (2, 2, 2)),
((1, 2, 6, 5, 4), 2),
((1, 2, 5, 6, 5), (2, 3, 2)),
((1, 2, 6, 6, 6), (2, 3, 2)),
((1, 1, 7, 6, 7), (2, 3, 4)),
((1, 1, 4, 5, 4), (2, 2, 1)),
((1, 1, 8, 7, 6), (4, 3, 2)),
((0, 1, 4, 5, 4), (2, 2, 1)))
samples = []
for input_shape, kernel_size in cases:
for return_indices in [False, True]:
# test case passing a single output size
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_size=(2), return_indices=return_indices)
))
# test case passing a tuple output size
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_size=(2, 3, 2), return_indices=return_indices)
))
# test case passing an output ratio
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_ratio=(0.5, 0.5, 0.5), return_indices=return_indices)
))
return samples
def sample_inputs_avgpool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override
cases = (((1, 3, 9, 9), 3, 1, 1, True, False, 2),
((1, 3, 9, 9), (4, 4), (2, 3), 1, True, False, 2),
((1, 3, 9, 9), (6, 6), (3, 3), (2, 3), True, True, 2),
((2, 3, 9, 9), (3, 3), (1, 1), (1, ), True, False, 2),
((1, 1, 4, 4), (2, 2), (), (0, ), False, True, -2),
((1, 2, 6, 6), (4, 4), (2, 2), (2, ), True, True, None))
for input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override in cases:
yield SampleInput(make_arg(input_shape),
args=(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override))
# Case with just input_shape and kernel_size
yield SampleInput(make_arg((1, 3, 9, 9)), args=((3, 3)))
def sample_inputs_avgpool1d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size, kwargs
cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [
((2, 3, 9), (3,), dict()),
((1, 3, 9), 3, dict(stride=1, padding=1, ceil_mode=True, count_include_pad=False)),
((1, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=True, count_include_pad=True)),
((2, 3, 9), (3,), dict(stride=(1,), padding=(1,), ceil_mode=False, count_include_pad=True)),
((0, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=False, count_include_pad=True)),
((1, 2, 9), (7,), dict(stride=(3,), padding=(2,), ceil_mode=False)),
((1, 2, 9), (7,), dict(stride=(3,), padding=(3,), ceil_mode=True)),
((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=False)),
((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=True)),
]
for input_shape, kernel_size, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs)
def sample_inputs_avgpool3d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override
cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [
((2, 3, 3, 4, 4), (2, 2, 2), dict()),
((1, 2, 4, 4, 4), 2, dict(stride=1, padding=1, ceil_mode=True,
count_include_pad=False, divisor_override=2)),
((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=True,
count_include_pad=True, divisor_override=2)),
((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=False)),
((1, 1, 7, 5, 7), (6, 3, 4), dict(stride=(2, 3, 2), padding=(3, 1, 0), ceil_mode=False,
count_include_pad=False, divisor_override=2)),
((1, 1, 4, 5, 4), (2, 2, 3), dict(stride=(2, 2, 1), padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=-2)),
((1, 1, 6, 5, 6), (4, 5, 6), dict(stride=(2, 3, 2), padding=2, ceil_mode=True,
count_include_pad=True, divisor_override=None)),
((0, 1, 4, 5, 4), (2, 3, 1), dict(stride=(2, 1, 2), padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None)),
]
for input_shape, kernel_size, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs)
def sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs):
def get_tensor_input(size):
return make_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad)
inputs = []
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3,)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True, True)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1,)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, 0)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, -1)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True, True)))
return inputs
def sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
arg_a = make_tensor((S,), dtype=dtype, device=device, requires_grad=requires_grad)
arg_b = make_tensor((M,), dtype=dtype, device=device, requires_grad=requires_grad)
inputs.append(SampleInput(arg_a, args=(arg_b,)))
return inputs
def sample_inputs_dist(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S))
ps = (2, 4)
for size_x, size_y, p in product(sizes, sizes, ps):
yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p))
# Missing to test the nondeterminism of the operation
# https://github.com/pytorch/pytorch/issues/53352
def sample_inputs_index(op_info, device, dtype, requires_grad, **kwargs):
# target.index_select(dim, idx)
select = op_info.name == "index_select"
# target.index_add(dim, idx, source, *, alpha=1)
add = op_info.name == "index_add"
# target.index_copy(dim, idx, source)
copy = op_info.name == "index_copy"
# target.index_fill(dim, idx, value)
fill = op_info.name == "index_fill"
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_permutation = partial(torch.randperm, device=device, dtype=torch.int64)
def make_idx(n):
return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=n)
shapes = [(), (1,), (S, S)]
# extra parameter for add
alphas = (-1, 0, 2) if add else (None,)
for shape, alpha in product(shapes, alphas):
t = make_arg(shape)
args = []
# dim. We handle the scalar case
dim = 1 if t.ndim == 2 else 0
args.append(dim)
# idx They need to be different for copy and add to be deterministic
make_idx_fn = make_permutation if copy or add else make_idx
idx = make_idx_fn(t.shape[dim] if t.ndim != 0 else 1)
args.append(idx)
# source
if copy or add:
args.append(make_arg(shape))
elif fill:
# A weird number to catch errors
args.append(make_arg((1,)).item())
args = tuple(args)
kwargs = {} if alpha is None else {"alpha": alpha}
yield SampleInput(t, args=args, kwargs=kwargs)
def sample_inputs_index_reduce(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def make_idx(n, m):
return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=m)
shapes = [((), ()), ((1,), (1,)), ((S, S), (S, M)), ((S, S, S), (S, M, S))]
include_selfs = (True, False)
reduces = ('prod', 'mean', 'amin', 'amax')
for shape, include_self, reduce in product(shapes, include_selfs, reduces):
self_shape, src_shape = shape
# dim. We handle the scalar case
dim = 1 if len(self_shape) >= 2 else 0
idx = make_idx(src_shape[dim] if len(src_shape) != 0 else 1,
self_shape[dim] if len(self_shape) != 0 else 1)
args = (dim, idx, make_arg(src_shape), reduce)
yield SampleInput(make_arg(self_shape),
args=args,
kwargs={'include_self' : include_self})
# Sample inputs to test edge cases for backward
if requires_grad:
# Check that gradients are propagated correctly for prod when zeros in self/src are reduced
# This sample tests gradients for the following cases
# (a) 1 zero reduced (from source (self[0, 1]), from self (self[0, 0]))
# (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0], self[1, 1])
# (c) no zeros reduced (self[2, 1], self[2, 2])
# (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py
# test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad
input = torch.tensor([[0, 13], [0, 0], [15, 19]], dtype=dtype, device=device, requires_grad=requires_grad)
src = torch.tensor([[2, 0], [0, 0], [2, 3], [2, 2]], dtype=dtype, device=device, requires_grad=requires_grad)
idx = torch.tensor([0, 1, 2, 0], dtype=torch.long, device=device)
yield SampleInput(input,
args=(0, idx, src, 'prod'),
kwargs={'include_self': True})
def sample_inputs_mode(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
args = (
((S, S, S), (),),
((S, S, S), (1, ),),
((S, S, S), (1, True, ),),
((), (),),
((), (0,),),
((), (0, True,),),
# Non-fused mode kernel on CUDA
((3000,), ()),
)
inputs = list((SampleInput(make_tensor(input_tensor, dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
args=args,))
for input_tensor, args in args)
return inputs
# Missing to test the nondeterminism of the operation
# https://github.com/pytorch/pytorch/issues/53352
def sample_inputs_put(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)
S = 3
# Generic inputs
idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S]
idx_list = [idx, -idx - 1]
for idx, acc in product(idx_list, (True, False)):
yield SampleInput(input=make_arg((S, S)),
args=(idx.clone(),
make_arg((S,)),
acc))
# Scalar cases
scalar_sizes = [(), (1,)]
tgt_gen = (make_arg(size) for size in scalar_sizes)
idx_gen = (make_idx(size, high=1) for size in scalar_sizes)
src_gen = (make_arg(size) for size in scalar_sizes)
for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)):
yield SampleInput(input=tgt.clone().requires_grad_(requires_grad),
args=(idx.clone(),
src.clone().requires_grad_(requires_grad),
acc))
# Empty cases
tgt_sizes = [(0,), (), (1,), (3, 2)]
tgt_gen = (make_arg(size) for size in tgt_sizes)
idx = make_idx((0,), high=1)
src = make_arg((0,))
for tgt, acc in product(tgt, (True, False)):
yield SampleInput(input=tgt.clone().requires_grad_(requires_grad),
args=(idx.clone(),
src.clone().requires_grad_(requires_grad),
acc))
def sample_inputs_take(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)
S = 3
# Generic inputs: take S elements out of S * S
index = make_idx((S,), high=(S * S))
for idx in (index, -index - 1):
yield SampleInput(input=make_arg((S, S)), args=(idx,))
# Scalar cases
scalar_sizes = [(), (1,)]
src_gen = (make_arg(size) for size in scalar_sizes)
idx_gen = (make_idx(size, high=1) for size in scalar_sizes)
for src, idx in product(src_gen, idx_gen):
yield SampleInput(input=src.clone().requires_grad_(requires_grad),
args=(idx.clone(),))
# Empty cases
src_sizes = [(0,), (), (1,), (3, 2)]
src_gen = (make_arg(size) for size in src_sizes)
idx = make_idx((0,), high=1)
for src in src_gen:
yield SampleInput(input=src.clone().requires_grad_(requires_grad),
args=(idx.clone(),))
def sample_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((4, 3, 2, 1), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=([0, 1, 2, 3], [3, 2, 1, 0])),
SampleInput(
make_tensor((4, 3, 2, 1), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=([0, -1, -2, -3], [-3, -2, -1, -0]))
)
def sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),)
shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1))
if requires_grad:
# Tests for variant_consistency_jit, grad, gradgrad
# are slower. Use smaller bags of `rep_dims` and `shapes`
# in this case.
rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment]
shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment]
samples = []
for rep_dim, shape in product(rep_dims, shapes):
# `torch.repeat` errors for `len(rep_dims) < t.dim()`,
# so we filter such combinations.
if op_info.name == 'repeat' and len(rep_dim) < len(shape):
continue
samples.append(SampleInput(make_arg(shape), args=(rep_dim,),))
return samples
def sample_inputs_narrow(op_info, device, dtype, requires_grad, **kwargs):
shapes_and_args = (
((S, S, S), (1, 2, 2)),
((S, S, S), (-1, 2, 2)),
((S, S, S), (1, 0, 0)),
((S, S, S), (-1, 0, 0)),
((S, S, S), (2, 1, 2)),
)
for shape, args in shapes_and_args:
tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None,
requires_grad=requires_grad)
yield SampleInput(tensor, args=args)
def sample_trapezoid(op_info, device, dtype, requires_grad, **kwargs):
y_shape_x_shape_and_kwargs = [
((2, 3), (2, 3), {}),
((2, 3), (2, 3), {'dim': 1}),
((6,), (6,), {}),
((6,), None, {}),
# When 'trapezoid' is called with an empty input, it does not produce an output with requires_grad
# See Issue #{61619}
# ((6,0), (6,0), {}),
((2, 3), (1, 3), {}),
((3, 3), (3, 3), {}),
((3, 3), (3, 3), {'dim': -2}),
((5,), None, {'dx': 2.0}),
((2, 2), None, {'dx': 3.0})
]
samples = []
for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs:
y_tensor = make_tensor(y_shape, dtype=dtype, device=device, low=None, high=None,
requires_grad=requires_grad)
if x_shape is not None:
x_tensor = make_tensor(x_shape, dtype=dtype, device=device, low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg))
else:
samples.append(SampleInput(y_tensor, kwargs=kwarg))
return samples
def sample_cumulative_trapezoid(op_info, device, dtype, requires_grad, **kwargs):
y_shape_x_shape_and_kwargs = [
((2, 3), (2, 3), {}),
((2, 3), (2, 3), {'dim': 1}),
((6,), (6,), {}),
((6,), None, {}),
# When 'cumulative_trapezoid' is called with an empty input, it does not produce an output with requires_grad
# See Issue #{61619}
# ((6,0), (6,0), {}),
((2, 3), (1, 3), {}),
((3, 3), (3, 3), {}),
((3, 3), (3, 3), {'dim': -2}),
((5,), None, {'dx': 2.0}),
((2, 2), None, {'dx': 3.0})
]
samples = []
for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs:
y_tensor = make_tensor(y_shape, dtype=dtype, device=device, low=None, high=None,
requires_grad=requires_grad)
if x_shape is not None:
x_tensor = make_tensor(x_shape, dtype=dtype, device=device, low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg))
else:
samples.append(SampleInput(y_tensor, kwargs=kwarg))
return samples
def sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs):
shapes_and_axes = [
((3, 4, 5), 0),
((3, 4, 5), 1),
((3, 4, 5), 3),
((3, 4, 5), -1),
((3, 4, 5), -3),
((), 0),
((), -1),
((1,), 0),
((1,), -1),
]
samples = []
for shape, axis in shapes_and_axes:
tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(tensor, args=(axis,),))
return samples
def sample_inputs_nn_unfold(op_info, device, dtype, requires_grad, **kwargs):
shapes = ((0, 1, 5, 5), (1, 1, 5, 5), (2, 3, 5, 5))
kernel_sizes = (2, (2, 2), (3, 3))
dilations = (1, 2, (1, 2))
paddings = (0, 1, (1, 1))
strides = (1, 2, (1, 2))
cases = product(shapes, kernel_sizes, dilations, paddings, strides)
for shape, kernel_size, dilation, padding, stride in cases:
tensor = make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad)
yield SampleInput(tensor, args=(kernel_size, dilation, padding, stride))
# With default args
yield SampleInput(make_tensor((1, 1, 5, 5), dtype=dtype, device=device, requires_grad=requires_grad),
args=((3, 3),))
def sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs):
shapes_and_args = (
((S, 1, S, 1), ()),
((1, 1, 1, 1), ()),
((S, 1, S, 1), (1,)),
((S, 1, S, 1), (-1,)),
((S, 1, S, 1), (2,)),
((S, 1, S, 1), (-2,)),
((), (0, )),
)
for shape, args in shapes_and_args:
tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None,
requires_grad=requires_grad)
yield SampleInput(tensor, args=args)
def sample_inputs_nn_pad(op_info, device, dtype, requires_grad, mode, **kwargs):
assert mode in ('constant', 'reflect', 'replicate', 'circular')
if mode in ['reflect', 'replicate']:
cases: tuple = ( # ignore
((1, 3), (1, 2)),
((1, 3), (0, 1)),
((0, 3, 3), (1, 2)),
((0, 3, 3), (0, 1)),
((1, 3, 3), (1, 2)),
((1, 3, 3), (0, 1)),
((1, 3, 3), (0, 2, 0, 1)),
((0, 3, 3, 3), (0, 2, 0, 1)),
((3, 3, 5, 5), (0, 2, 0, 1)),
((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)),
((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 4, 4), (-1, 1, -2, 1)),
)
elif mode == 'constant':
cases = (
((1, 3), (1, 2)),
((1, 3), (0, 1)),
((1, 3), (0, 2, 0, 1)),
((0, 3, 3), (1, 2)),
((0, 3, 3), (0, 1)),
((0, 3, 3), (0, 2, 0, 1)),
((0, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 3), (1, 2)),
((1, 3, 3), (0, 1)),
((1, 3, 3), (0, 2, 0, 1)),
((1, 3, 3), (1, 1, 1, 1, 1, 1)),
((0, 3, 3, 3), (1, 2)),
((0, 3, 3, 3), (0, 1)),
((0, 3, 3, 3), (0, 2, 0, 1)),
((0, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((3, 3, 5, 5), (1, 2)),
((3, 3, 5, 5), (0, 1)),
((3, 3, 5, 5), (0, 2, 0, 1)),
((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)),
((1, 3, 3, 3, 3), (1, 2)),
((1, 3, 3, 3, 3), (0, 1)),
((1, 3, 3, 3, 3), (0, 2, 0, 1)),
((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 4, 4), (-1, 1, -2, 1)),
)
else: # mode == 'circular'
if dtype == torch.bool:
# test_dtypes fails on ASAN with for the case ab
# runtime error: load of value 190, which is not a valid value for type 'bool'
# Reference: https://github.com/pytorch/pytorch/pull/62814#issuecomment-894156562
# Reference Issue: https://github.com/pytorch/pytorch/issues/63034
cases = (
((2, 3, 3), (1, 2)),
((1, 3, 3), (1, 2)),
)
else:
cases = (
((0, 3, 3), (1, 2)),
((0, 3, 3), (0, 1)),
((1, 3, 3), (1, 2)),
((1, 3, 3), (0, 1)),
((0, 3, 3, 3), (0, 2, 0, 1)),
((3, 3, 5, 5), (0, 2, 0, 1)),
((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 4, 4), (-1, 1, -2, 1)),
)
make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
if mode == 'constant':
# Default args
yield SampleInput(make_inp((1, 3, 3)), args=((2, 2),))
if mode in ['reflect', 'replicate', 'circular']:
for shape, pad in cases:
yield SampleInput(make_inp(shape), args=(pad, mode))
else: # mode == 'constant'
for pad_value in (1., 2.):
for shape, pad in cases:
yield SampleInput(make_inp(shape), args=(pad, mode, pad_value))
def sample_inputs_constant_pad_nd(op_info, device, dtype, *args, **kwargs):
# Inherit sample inputs from nn.pad, but transform them to fit
# constant_pad_nd's interface
nn_samples = sample_inputs_nn_pad(op_info, device, dtype, *args,
mode='constant', **kwargs)
# NOTE: primTorch is more strict about the type of the fill value argument
# So we must cast it to the correct dtype
from torch._prims_common import dtype_to_type
scalar_type = dtype_to_type(dtype)
def drop_mode_argument(input, pad, mode=None, value=None):
if value is None:
return SampleInput(input, args=(pad,))
else:
return SampleInput(input, args=(pad, scalar_type(value)))
for sample in nn_samples:
yield drop_mode_argument(sample.input, *sample.args, **sample.kwargs)
def np_unary_ufunc_integer_promotion_wrapper(fn):
# Wrapper that passes PyTorch's default scalar
# type as an argument to the wrapped NumPy
# unary ufunc when given an integer input.
# This mimicks PyTorch's integer->floating point
# type promotion.
#
# This is necessary when NumPy promotes
# integer types to double, since PyTorch promotes
# integer types to the default scalar type.
# Helper to determine if promotion is needed
def is_integral(dtype):
return dtype in [np.bool_, bool, np.uint8, np.int8, np.int16, np.int32, np.int64]
@wraps(fn)
def wrapped_fn(x):
# As the default dtype can change, acquire it when function is called.
# NOTE: Promotion in PyTorch is from integer types to the default dtype
np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]
if is_integral(x.dtype):
return fn(x.astype(np_dtype))
return fn(x)
return wrapped_fn
def sample_inputs_repeat_interleave(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
SampleInput(make_input(()), kwargs=dict(repeats=2)),
SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=2)),
SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=2, dim=1)),
SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=torch.arange(3, device=device), dim=1))
]
def sample_inputs_stft(op_info, device, dtype, requires_grad, **kwargs):
def mt(shape, **kwargs):
return make_tensor(shape, device=device, dtype=dtype,
requires_grad=requires_grad, **kwargs)
yield SampleInput(mt(100), kwargs=dict(n_fft=10))
for center in [False, True]:
yield SampleInput(mt(10), kwargs=dict(n_fft=7, center=center))
yield SampleInput(mt((10, 100)), kwargs=dict(n_fft=16, hop_length=4, center=center))
window = make_tensor(16, low=.5, high=2.0, dtype=dtype, device=device, requires_grad=requires_grad)
yield SampleInput(
mt((2, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center))
yield SampleInput(
mt((3, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center))
if not dtype.is_complex:
yield SampleInput(
mt((10, 100)), kwargs=dict(n_fft=16, window=window, onesided=False))
def sample_inputs_istft(op_info, device, dtype, requires_grad, **kwargs):
def mt(shape, **kwargs):
real_shape = shape if dtype.is_complex else shape + (2,)
return make_tensor(real_shape, device=device, dtype=dtype,
requires_grad=requires_grad, **kwargs)
yield SampleInput(mt((10, 2)), kwargs=dict(n_fft=10))
yield SampleInput(mt((6, 3)), kwargs=dict(n_fft=6, onesided=False))
yield SampleInput(mt((6, 4)), kwargs=dict(n_fft=10, onesided=True))
for center in [False, True]:
yield SampleInput(mt((10, 10, 6)), kwargs=dict(n_fft=10, center=center))
yield SampleInput(mt((1, 9, 10)), kwargs=dict(n_fft=16, hop_length=4, center=center))
window = make_tensor(10, low=.5, high=2.0, dtype=dtype, device=device, requires_grad=requires_grad)
yield SampleInput(mt((10, 10, 6)), kwargs=dict(
n_fft=10, window=window, center=center, return_complex=dtype.is_complex))
yield SampleInput(mt((10, 10, 10)), kwargs=dict(
n_fft=10, window=window[:8], win_length=8, center=center, return_complex=True))
real_window = window if not dtype.is_complex else window.real
yield SampleInput(mt((10, 5, 6)), kwargs=dict(n_fft=8, window=real_window[:8], center=center))
def sample_inputs_fftshift(op_info, device, dtype, requires_grad, **kwargs):
def mt(shape, **kwargs):
return make_tensor(shape, device=device, dtype=dtype,
requires_grad=requires_grad, **kwargs)
yield SampleInput(mt((9, 10)))
yield SampleInput(mt((50,)), kwargs=dict(dim=0))
yield SampleInput(mt((5, 11)), kwargs=dict(dim=(1,)))
yield SampleInput(mt((5, 6)), kwargs=dict(dim=(0, 1)))
yield SampleInput(mt((5, 6, 2)), kwargs=dict(dim=(0, 2)))
def sample_inputs_linalg_cholesky_inverse(op_info, device, dtype, requires_grad=False, **kwargs):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
# Cholesky factorization is for positive-definite matrices
single_well_conditioned_matrix = random_well_conditioned_matrix(S, S, dtype=dtype, device=device)
batch_well_conditioned_matrices = random_well_conditioned_matrix(2, S, S, dtype=dtype, device=device)
single_pd = single_well_conditioned_matrix @ single_well_conditioned_matrix.mH
batch_pd = batch_well_conditioned_matrices @ batch_well_conditioned_matrices.mH
inputs = (
torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix
torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices
single_pd,
batch_pd
)
test_cases = (torch.linalg.cholesky(a, upper=False) for a in inputs)
for l in test_cases:
# generated lower-triangular samples
l.requires_grad = requires_grad
yield SampleInput(l) # upper=False by default
yield SampleInput(l.detach().clone().requires_grad_(requires_grad), kwargs=dict(upper=False))
# generate upper-triangular inputs
u = l.detach().clone().mT.contiguous().requires_grad_(requires_grad)
yield SampleInput(u, kwargs=dict(upper=True))
def sample_inputs_linalg_ldl_factor(op_info, device, dtype, requires_grad=False, **kwargs):
from torch.testing._internal.common_utils import (
random_hermitian_pd_matrix,
random_symmetric_pd_matrix,
)
device = torch.device(device)
# Symmetric inputs
yield SampleInput(
random_symmetric_pd_matrix(S, dtype=dtype, device=device),
kwargs=dict(hermitian=False),
) # single matrix
yield SampleInput(
random_symmetric_pd_matrix(S, 2, dtype=dtype, device=device),
kwargs=dict(hermitian=False),
) # batch of matrices
yield SampleInput(
torch.zeros(0, 0, dtype=dtype, device=device), kwargs=dict(hermitian=False)
) # 0x0 matrix
yield SampleInput(
torch.zeros(0, 2, 2, dtype=dtype, device=device), kwargs=dict(hermitian=False)
) # zero batch of matrices
# Hermitian inputs
# hermitian=True for complex inputs on CUDA is supported only with MAGMA 2.5.4+
magma_254_available = device.type == 'cuda' and _get_magma_version() >= (2, 5, 4)
if dtype.is_complex and (device.type == 'cpu' or magma_254_available):
yield SampleInput(
random_hermitian_pd_matrix(S, dtype=dtype, device=device),
kwargs=dict(hermitian=True),
) # single matrix
yield SampleInput(
random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device),
kwargs=dict(hermitian=True),
) # batch of matrices
def sample_inputs_linalg_ldl_solve(op_info, device, dtype, requires_grad=False, **kwargs):
# Generate LDL factors of symmetric (and Hermitian on CPU) matrices
from torch.testing._internal.common_utils import random_hermitian_pd_matrix, random_symmetric_pd_matrix
device = torch.device(device)
symmetric_inputs = (
random_symmetric_pd_matrix(S, dtype=dtype, device=device), # single matrix
random_symmetric_pd_matrix(S, 2, dtype=dtype, device=device), # batch of matrices
torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix
torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices
)
hermitian_inputs = (
random_hermitian_pd_matrix(S, dtype=dtype, device=device),
random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device),
) if device.type == 'cpu' and dtype.is_complex else ()
test_cases1 = (torch.linalg.ldl_factor_ex(a, hermitian=False) for a in symmetric_inputs)
test_cases2 = (torch.linalg.ldl_factor_ex(a, hermitian=True) for a in hermitian_inputs)
# Symmetric case
for test_case in test_cases1:
factors, pivots, _ = test_case
factors.requires_grad = requires_grad
for B_batch_shape in ((), factors.shape[:-2]):
B = make_tensor((*B_batch_shape, factors.shape[-1], S), device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=False))
clone_factors = factors.detach().clone().requires_grad_(requires_grad)
yield SampleInput(clone_factors, args=(pivots, B), kwargs=dict(hermitian=False))
# Hermitian case
for test_case in test_cases2:
factors, pivots, _ = test_case
factors.requires_grad = requires_grad
for B_batch_shape in ((), factors.shape[:-2]):
B = make_tensor((*B_batch_shape, factors.shape[-1], S), device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=True))
clone_factors = factors.detach().clone().requires_grad_(requires_grad)
yield SampleInput(clone_factors, args=(pivots, B), kwargs=dict(hermitian=True))
def sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
device = torch.device(device)
drivers: Tuple[str, ...]
if device.type == 'cuda':
drivers = ('gels',)
else:
drivers = ('gels', 'gelsy', 'gelss', 'gelsd')
# we generate matrices of shape (..., n + delta, n)
deltas: Tuple[int, ...]
if device.type == 'cpu' or has_cusolver():
deltas = (-1, 0, +1)
# only square systems if Cusolver is not available
# becase we solve a lstsq problem with a transposed matrix in the backward
else:
deltas = (0,)
out = []
for batch, driver, delta in product(((), (3,), (3, 3)), drivers, deltas):
shape = batch + (3 + delta, 3)
a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)
a.requires_grad_(requires_grad)
b = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)
out.append(SampleInput(a, args=(b,), kwargs=dict(driver=driver)))
return out
def sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs):
"""
This function generates input for torch.linalg.householder_product (torch.orgqr).
The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors.
Empty, square, rectangular, batched square and batched rectangular input is generated.
"""
# Each column of the matrix is getting multiplied many times leading to very large values for
# the Jacobian matrix entries and making the finite-difference result of grad check less accurate.
# That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here.
samples = (
SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S,), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((S + 1, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S,), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((2, 1, S, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((2, 1, S,), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((2, 1, S + 1, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((2, 1, S,), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((0, 0), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(make_tensor((0,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)),
SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((0,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)),
# m = n = S, k = S - 2
SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S - 2,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)),
# m = S, n = S -1, k = S - 2
SampleInput(make_tensor((S, S - 1), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S - 2,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)),
)
return samples
def sample_inputs_ormqr(op_info, device, dtype, requires_grad, **kwargs):
# create a helper function wrapping `make_tensor`
make_input = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def gen_inputs():
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
tf = [True, False]
for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf):
reflectors = make_input((*batch, m, n))
tau = make_input((*batch, min(m, n)))
other_matrix_shape = (m, n) if left else (n, m)
other = make_input((*batch, *other_matrix_shape))
kwargs = {"left": left, "transpose": transpose}
yield SampleInput(reflectors, args=(tau, other,), kwargs=kwargs)
return tuple(gen_inputs())
def sample_inputs_linalg_cholesky(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates always positive-definite input for torch.linalg.cholesky using
random_hermitian_pd_matrix.
The input is generated as the itertools.product of 'batches' and 'ns'.
In total this function generates 8 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices,
(1, 1) - 1x1 batch of matrices
'ns' gives 0x0 and 5x5 matrices.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
"""
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 0]
out = []
for batch, n, upper in product(batches, ns, [True, False]):
a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)
a.requires_grad = requires_grad
out.append(SampleInput(a, kwargs={"upper": upper}))
return out
def sample_inputs_symeig(op_info, device, dtype, requires_grad=False, **kwargs):
out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for o in out:
o.kwargs = {"upper": bool(np.random.choice([True, False])),
"eigenvectors": True}
# A gauge-invariant function
o.output_process_fn_grad = lambda output: (output[0], abs(output[1]))
yield o
def sample_inputs_linalg_eig(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.eig
"""
def out_fn(output):
return output[0], abs(output[1])
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
sample.output_process_fn_grad = out_fn
yield sample
def sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.eigh/eigvalsh with UPLO="U" or "L" keyword argument.
"""
def out_fn(output):
if isinstance(output, tuple):
# eigh function
return output[0], abs(output[1])
else:
# eigvalsh function
return output
# Samples do not need to be Hermitian, as we're using gradcheck_wrapper_hermitian_input
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
sample.kwargs = {"UPLO": np.random.choice(["L", "U"])}
sample.output_process_fn_grad = out_fn
yield sample
def sample_inputs_linalg_pinv(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.pinv with hermitian=False keyword argument.
"""
for o in sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs):
real_dtype = o.input.real.dtype if dtype.is_complex else dtype
# requires_grad path for rtol tensor is not implemented
for rtol in (None, 1.0, torch.tensor(1.0, dtype=real_dtype, device=device)):
o = clone_sample(o)
o.kwargs = {"rtol": rtol}
yield o
def sample_inputs_linalg_pinv_hermitian(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.pinv with hermitian=True keyword argument.
"""
for o in sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs):
o.kwargs = {"hermitian": True}
yield o
def sample_inputs_linalg_solve(op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs):
"""
This function generates always solvable input for torch.linalg.solve
We sample a fullrank square matrix (i.e. invertible) A
The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'.
The second input is generated as the product of 'batches', 'ns' and 'nrhs'.
In total this function generates 18 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices.
'ns' gives 0x0 and 5x5 matrices.
and 'nrhs' controls the number of vectors to solve for:
() - using 1 as the number of vectors implicitly
(1,) - same as () but explicit
(3,) - solve for 3 vectors.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs.
torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow
1D tensors (vectors) as the right-hand-side.
Once torch.solve / triangular_solve / cholesky_solve and its testing are removed,
'vector_rhs_allowed' may be removed here as well.
"""
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_a = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad)
make_b = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
batches = [(), (0, ), (2, )]
ns = [5, 0]
if vector_rhs_allowed:
nrhs = [(), (1,), (3,)]
else:
nrhs = [(1,), (3,)]
for n, batch, rhs in product(ns, batches, nrhs):
yield SampleInput(make_a(*batch, n, n), args=(make_b((batch + (n,) + rhs)),))
def sample_inputs_linalg_solve_triangular(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
bs = (1, 2, 0)
ns = (3, 0)
ks = (1, 3, 0)
for b, n, k, (left, upper, uni) in product(bs, ns, ks, product((True, False), repeat=3)):
if b == 1:
A = make_arg((n, n)) if left else make_arg((k, k))
B = make_arg((n, k))
else:
A = make_arg((b, n, n)) if left else make_arg((b, k, k))
B = make_arg((b, n, k))
if uni:
# Not really necessary, but writing it for consistency
A.diagonal(0, -2, -1).fill_(1.)
else:
d = A.diagonal(0, -2, -1)
d[d.abs() < 1e-6] = 1.
if upper:
A.triu_()
else:
A.tril_()
kwargs = {"upper": upper, "left": left, "unitriangular": uni}
if requires_grad:
for grad_A, grad_B in product((True, False), repeat=2):
# Either A or B needs to have a gradient
if not grad_A and not grad_B:
continue
yield SampleInput(
A.clone().requires_grad_(grad_A),
args=(B.clone().requires_grad_(grad_B),),
kwargs=kwargs)
else:
yield SampleInput(A, args=(B,), kwargs=kwargs)
def sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates always solvable input for legacy solve functions
(the ones that are not in torch.linalg module).
The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation
should have b.ndim >= 2, vectors are not allowed.
Also the arguments order is swapped.
"""
out = sample_inputs_linalg_solve(
op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False
)
def out_fn(output):
return output[0]
# Reverses tensor order
for sample in out:
sample.input, sample.args = sample.args[0], (sample.input,)
if op_info.name == "solve":
sample.output_process_fn_grad = out_fn
yield sample
def sample_inputs_cholesky_solve(op_info, device, dtype, requires_grad=False, **kwargs):
cholesky_inverse_samples = sample_inputs_linalg_cholesky_inverse(
op_info, device, dtype, requires_grad=False
)
for sample in cholesky_inverse_samples:
psd_matrix = sample.input
sample.input = make_tensor(psd_matrix.shape, dtype=dtype, device=device, requires_grad=requires_grad, low=None, high=None)
sample.args = (psd_matrix.requires_grad_(requires_grad),)
yield sample
def sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_fullrank_matrices_with_distinct_singular_values,
dtype=dtype, device=device, requires_grad=requires_grad)
# not needed once OpInfo tests support Iterables
batch_shapes = ((), (3,), (3, 3))
for batch_shape, get_infos, size_delta in product(batch_shapes, (True, False), (-2, -1, 0, +1, +2)):
shape = batch_shape + (S + size_delta, S)
input = make_arg(*shape)
yield SampleInput(input, args=(True, get_infos))
def sample_inputs_linalg_lu(op_info, device, dtype, requires_grad=False, **kwargs):
full_rank = (op_info.name == "linalg.lu_factor")
make_fn = make_tensor if not full_rank else make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad)
def out_fn(output):
if op_info.name == "linalg.lu":
return output[1], output[2]
else:
return output
batch_shapes = ((), (3,), (3, 3))
# pivot=False only supported in CUDA
pivots = (True, False) if torch.device(device).type == "cuda" else (True,)
deltas = (-2, -1, 0, +1, +2)
for batch_shape, pivot, delta in product(batch_shapes, pivots, deltas):
shape = batch_shape + (S + delta, S)
# Insanely annoying that make_fullrank_blablabla accepts a *shape and not a tuple!
A = make_arg(shape) if not full_rank else make_arg(*shape)
yield SampleInput(A, kwargs={"pivot": pivot}, output_process_fn_grad=out_fn)
def sample_inputs_lu_solve(op_info, device, dtype, requires_grad=False, **kwargs):
""" Samples the inputs for both linalg.lu_solve and lu_solve """
make_fn = make_fullrank_matrices_with_distinct_singular_values
make_a = partial(make_fn, dtype=dtype, device=device)
make_b = partial(make_tensor, dtype=dtype, device=device)
def clone(X, requires_grad):
Y = X.clone()
Y.requires_grad_(requires_grad)
return Y
is_linalg_lu_solve = (op_info.name == "linalg.lu_solve")
batches = ((), (0, ), (2, ))
ns = (3, 1, 0)
nrhs = (4, 1, 0)
for n, batch, rhs in product(ns, batches, nrhs):
A = make_a(*(batch + (n, n)))
LU, pivots = torch.linalg.lu_factor(A)
B = make_b(batch + (n, rhs))
grads = (False,) if not requires_grad else (True, False)
# we try all possible combinations of requires_grad for each input
for LU_grad, B_grad in product(grads, grads):
# when requires_grad == True, at least one input has to have requires_grad enabled
if requires_grad and not LU_grad and not B_grad:
continue
if is_linalg_lu_solve:
for adjoint, left in product((True, False), repeat=2):
yield SampleInput(clone(LU, LU_grad),
args=(pivots, clone(B if left else B.mT, B_grad)),
kwargs=dict(adjoint=adjoint, left=left))
else:
yield SampleInput(clone(B, B_grad), args=(clone(LU, LU_grad), pivots))
def sample_inputs_lu_unpack(op_info, device, dtype, requires_grad=False, **kwargs):
def out_fn(output):
return output[1], output[2]
for lu_sample in sample_inputs_linalg_lu(op_info, device, dtype, requires_grad, **kwargs):
lu_data, pivots = torch.linalg.lu_factor(lu_sample.input)
lu_data.requires_grad_(requires_grad)
yield SampleInput(lu_data, args=(pivots,), output_process_fn_grad=out_fn)
def sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2)))
for arg in args:
yield SampleInput(make_arg((0, 0, 0)), args=arg)
yield SampleInput(make_arg((S, S, S)), args=arg)
def error_inputs_roll(op_info, device, **kwargs):
err_msg1 = "`shifts` required"
s1 = SampleInput(
make_tensor((S,), dtype=torch.float32, device=device), args=(tuple(),)
)
yield ErrorInput(s1, error_regex=err_msg1)
err_msg2 = ("shifts and dimensions must align")
s2 = SampleInput(
make_tensor((S, S), dtype=torch.float32, device=device), args=((2, 1), 0)
)
yield ErrorInput(s2, error_regex=err_msg2)
err_msg3 = ("out of range")
s3 = SampleInput(
make_tensor((S, ), dtype=torch.float32, device=device), args=(0, 2)
)
yield ErrorInput(s3, error_regex=err_msg3, error_type=IndexError)
def sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
args = itertools.product(range(-5, 6), [(0, 1), (1, 2), (1, -1)])
yield SampleInput(make_arg((S, S, S)))
for arg in args:
yield SampleInput(make_arg((S, S, S)), args=arg)
def error_inputs_rot90(op_info, device, **kwargs):
err_msg1 = "expected total rotation dims"
s1 = SampleInput(
make_tensor((S, S), dtype=torch.float32, device=device), kwargs={"dims": (0,)}
)
yield ErrorInput(s1, error_regex=err_msg1)
err_msg2 = "expected total dims >= 2"
s2 = SampleInput(
make_tensor((S,), dtype=torch.float32, device=device),
)
yield ErrorInput(s2, error_regex=err_msg2)
err_msg3 = "expected rotation dims to be different"
s3 = SampleInput(
make_tensor((S, S), dtype=torch.float32, device=device), kwargs={"dims": (1, 1)}
)
yield ErrorInput(s3, error_regex=err_msg3)
def sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs):
tensor_nd = partial(make_tensor, (S, S, S), device=device, dtype=dtype,
requires_grad=requires_grad)
tensor_1d = partial(make_tensor, (S,), device=device, dtype=dtype,
requires_grad=requires_grad)
return [
SampleInput(tensor_nd()),
SampleInput(tensor_nd(), kwargs=dict(dim=1)),
SampleInput(tensor_nd(), kwargs=dict(dim=1, unbiased=True, keepdim=True)),
SampleInput(tensor_1d(), kwargs=dict(dim=0, unbiased=True, keepdim=True)),
SampleInput(tensor_1d(), kwargs=dict(dim=0, unbiased=False, keepdim=False)),
SampleInput(tensor_nd(), kwargs=dict(dim=(1,), correction=S // 2)),
SampleInput(tensor_nd(), kwargs=dict(dim=None, correction=0, keepdim=True)),
]
def _generate_correlation_inputs(device, dtype, requires_grad, **kwargs):
shapes = [(2,), (1, 2), (3, 2), (2, 3)]
for shape in shapes:
yield make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad)
def sample_inputs_corrcoef(op_info, device, dtype, requires_grad, **kwargs):
return [SampleInput(t) for t in _generate_correlation_inputs(device, dtype, requires_grad)]
def sample_inputs_cov(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
for t in _generate_correlation_inputs(device, dtype, requires_grad):
inputs.append(SampleInput(t))
num_observations = t.numel() if t.ndimension() < 2 else t.size(1)
fweights = make_tensor((num_observations,), dtype=torch.int, device=device, low=1, high=10)
aweights = make_tensor((num_observations,), dtype=torch.float, device=device, low=0, high=1, requires_grad=requires_grad)
for correction, fw, aw in product(range(num_observations), [None, fweights], [None, aweights]):
inputs.append(SampleInput(t.clone().requires_grad_(requires_grad),
kwargs={'correction': correction, 'fweights': fw, 'aweights': aw}))
return inputs
def error_inputs_cov(op_info, device, **kwargs):
a = torch.rand(S, device=device)
error_inputs = []
error_inputs.append(ErrorInput(
SampleInput(torch.rand(S, S, S, device=device)),
error_regex="expected input to have two or fewer dimensions"))
error_inputs.append(ErrorInput(
SampleInput(a, kwargs={'fweights': torch.rand(S, S, device=device)}),
error_regex="expected fweights to have one or fewer dimensions"))
error_inputs.append(ErrorInput(
SampleInput(a, kwargs={'aweights': torch.rand(S, S, device=device)}),
error_regex="expected aweights to have one or fewer dimensions"))
error_inputs.append(ErrorInput(
SampleInput(a, kwargs={'fweights': torch.rand(S, device=device)}),
error_regex="expected fweights to have integral dtype"))
error_inputs.append(ErrorInput(
SampleInput(a, kwargs={'aweights': torch.tensor([1, 1], device=device)}),
error_regex="expected aweights to have floating point dtype"))
error_inputs.append(ErrorInput(
SampleInput(a, kwargs={'fweights': torch.tensor([1], device=device)}),
error_regex="expected fweights to have the same numel"))
error_inputs.append(ErrorInput(
SampleInput(a, kwargs={'aweights': torch.rand(1, device=device)}),
error_regex="expected aweights to have the same numel"))
error_inputs.append(ErrorInput(
SampleInput(a, kwargs={'fweights': torch.tensor([-1, -2, -3, -4 , -5], device=device)}),
error_regex="fweights cannot be negative"))
error_inputs.append(ErrorInput(
SampleInput(a, kwargs={'aweights': torch.tensor([-1., -2., -3., -4., -5.], device=device)}),
error_regex="aweights cannot be negative"))
return error_inputs
def sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad)
is_linalg_svd = ("linalg.svd" in op_info.name)
batches = [(), (0, ), (3, )]
ns = [0, 3, 5]
def uniformize(usv):
S = usv[1]
k = S.shape[-1]
U = usv[0][..., :k]
Vh = usv[2] if is_linalg_svd else usv[2].mH
Vh = Vh[..., :k, :]
return U, S, Vh
def fn_U(usv):
U, _, _ = uniformize(usv)
return U.abs()
def fn_S(usv):
return uniformize(usv)[1]
def fn_Vh(usv):
# We also return S to test
_, S, Vh = uniformize(usv)
return S, Vh.abs()
def fn_UVh(usv):
U, S, Vh = uniformize(usv)
return U @ Vh, S
fns = (fn_U, fn_S, fn_Vh, fn_UVh)
fullmat = 'full_matrices' if is_linalg_svd else 'some'
for batch, n, k, fullmat_val, fn in product(batches, ns, ns, (True, False), fns):
shape = batch + (n, k)
yield SampleInput(make_arg(*shape), kwargs={fullmat: fullmat_val}, output_process_fn_grad=fn)
def sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = [((1, 2, 3, 4), (0, 2, 3, 1)),
((1, 2, 3, 4), (0, -2, -1, 1)),
((), ()),
((1, 2, 3, 4), (2, 1, 3, 0))]
for shape, args in cases:
yield SampleInput(make_arg(shape), args=(args,))
def reference_inputs_permute(op, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_permute(op, device, dtype, requires_grad, **kwargs)
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((), ()),
((1,), (0,)),
((2, 2), (1, 0)),
((2, 2), (0, 1)),
((2, 0, 1), (0, 2, 1)),
((3, 4, 2), (2, 1, 0)),
((3, 4, 2), (1, 0, 2)),
((3, 4, 2), (0, 1, 2)),
)
# Adds tricky permutations and permutations with noncontiguity
for shape, permutation in cases:
for p in itertools.permutations(permutation):
a = make_arg(shape).permute(p)
yield SampleInput(a, args=(permutation,))
a = make_arg(shape, noncontiguous=True).permute(p)
yield SampleInput(a, args=(permutation,))
def sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 2, 0]
for batch, m, n in product(batches, ns, ns):
yield SampleInput(make_arg(batch + (m, n)))
def error_inputs_softshrink(op, device, **kwargs):
yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device), kwargs={"lambd": -0.5}),
error_regex="lambda must be greater or equal to 0, but found to be -0.5")
def sample_inputs_softshrink(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# The additional sample is to check additional values of lambd beyond the default
# value (what is already checked by sample_inputs_elementwise_unary)
for lbda in (0., 0.5):
yield SampleInput(make_arg(S, S), kwargs={"lambd": lbda})
yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad)
def sample_inputs_hardshrink(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# The additional sample is to check additional values of lambd beyond the default
# value (what is already checked by sample_inputs_elementwise_unary)
# Note that unlike softshrink, lambd is allowed to be negative for hardshrink
for lbda in (-0.5, 0., 0.5):
yield SampleInput(make_arg(S, S), kwargs={"lambd": lbda})
yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad)
def sample_inputs_hardtanh(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# The additional sample is to check additional values of min_val and max_val beyond the default
# value (what is already checked by sample_inputs_elementwise_unary)
for max_val, min_val in ((-0.5, 0.5), (0.5, -0.5), (0., 0.)):
yield SampleInput(make_arg(S, S), kwargs={"min_val": min_val, "max_val": max_val})
yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad)
def sample_inputs_eig(op_info, device, dtype, requires_grad=False, **kwargs):
eigvecs = make_tensor((S, S), device=device, dtype=dtype,
low=None, high=None)
eigvals = make_tensor((S,), device=device, dtype=dtype,
low=None, high=None)
# we produce only diagonazible inputs which do not have
# complex eigenvalues for real inputs, as there is no
# backward implementation for real inputs with complex
# eigenvalues yet.
input = (eigvecs * eigvals.unsqueeze(-2)) @ eigvecs.inverse()
input.requires_grad_(requires_grad)
def process_output(eigpair):
eigvals, eigvecs = eigpair
if dtype.is_complex:
# eig produces eigenvectors which are normalized to 1 norm.
# Note that if v is an eigenvector, so is v * e^{i \phi},
# and |v| = |v * e^{i \phi}| = 1.
# This, however, makes the eigenvector backward computation process
# rather unstable unless the objective function is gauge-invariant,
# that is if f(z) == f(|z|), for example.
# Hence for complex inputs we ignore the phases and return only
# the absolute values.
return eigvals, eigvecs.abs()
else:
return eigvals, eigvecs
return [
SampleInput(
input,
kwargs=dict(eigenvectors=True),
output_process_fn_grad=process_output
),
]
def sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs):
def c(t):
return t.clone().requires_grad_(requires_grad)
x = make_tensor((3,), dtype=dtype, device=device, requires_grad=requires_grad)
y = make_tensor((4,), dtype=dtype, device=device, requires_grad=requires_grad)
A = make_tensor((2, 3,), dtype=dtype, device=device, requires_grad=requires_grad)
B = make_tensor((1, 3,), dtype=dtype, device=device, requires_grad=requires_grad)
C = make_tensor((1, 2, 3,), dtype=dtype, device=device, requires_grad=requires_grad)
D = make_tensor((1, 3, 4,), dtype=dtype, device=device, requires_grad=requires_grad)
E = make_tensor((4, 4,), dtype=dtype, device=device, requires_grad=requires_grad)
H = make_tensor((3, 3,), dtype=dtype, device=device, requires_grad=requires_grad)
I = make_tensor((1, 3, 1,), dtype=dtype, device=device, requires_grad=requires_grad)
inputs = []
# Vector operations
inputs.append(SampleInput([c(x)], args=('i->',))) # sum
inputs.append(SampleInput([c(x), c(y)], args=('i,j->ij',))) # outer
# Matrix operations
inputs.append(SampleInput([c(A)], args=("ij->i",))) # col sum
inputs.append(SampleInput([c(A), c(B)], args=("ij,kj->ik",))) # matmul
inputs.append(SampleInput([c(A), c(E)], args=("ij,Ab->ijAb",))) # matrix outer product
# Tensor operations
inputs.append(SampleInput([c(C), c(D)], args=("aij,ajk->aik",))) # batch matmul
inputs.append(SampleInput([c(D), c(E)], args=("aij,jk->aik",))) # tensor matrix contraction
inputs.append(SampleInput([c(C), c(B)], args=("ijk,ik->j",))) # non contiguous
# Test diagonals
inputs.append(SampleInput([c(I)], args=('iji->j',))) # non-contiguous trace
# Test ellipsis
inputs.append(SampleInput([c(H)], args=("i...->...",)))
inputs.append(SampleInput([c(C), c(x)], args=('...ik, ...j -> ij',)))
return inputs
def sample_inputs_linalg_qr_geqrf(op_info, device, dtype, requires_grad=False, **kwargs):
# QR is just well defined when the matrix is full rank
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad)
batches = [(), (0,), (2, ), (1, 1)]
ns = [5, 2, 0]
for batch, (m, n) in product(batches, product(ns, ns)):
shape = batch + (m, n)
yield SampleInput(make_arg(*shape))
def sample_inputs_flip(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((S, M, S), (S, 0, M))
all_dims = ((0, 1, 2), (0,), (0, 2), (-1,), ())
for size, dims in product(sizes, all_dims):
yield SampleInput(make_arg(size), kwargs={"dims": dims})
def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs):
tensors = (
make_tensor((S, M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
make_tensor((S, 0, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)
)
return [SampleInput(tensor) for tensor in tensors]
def error_inputs_fliplr(op, device, **kwargs):
yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device)),
error_regex="Input must be >= 2-d.")
def error_inputs_flipud(op, device, **kwargs):
yield ErrorInput(SampleInput(make_tensor((), dtype=torch.float, device=device)),
error_regex="Input must be >= 1-d.")
# TODO: clamp shares tensors among its sample inputs --- we should prohibit this!
def sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs):
x = make_tensor((S, M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)
lb = make_tensor((S, M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)
ub = make_tensor((S, M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)
def detach(tensor):
return tensor.clone().detach_().requires_grad_(requires_grad)
return [
SampleInput(detach(x), args=(lb, ub)),
SampleInput(detach(x), args=(detach(lb[0]), detach(ub[0]))),
SampleInput(detach(x), args=(detach(lb[:, :1]),)),
]
def reference_inputs_elementwise_ternary(op, device, dtype, requires_grad, *, sample_inputs_func, supports_scalars=False, **kwargs):
yield from sample_inputs_func(op, device, dtype, requires_grad, **kwargs)
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_scalar_tensor = partial(make_tensor, (), device='cpu', dtype=dtype, requires_grad=requires_grad)
supported_dtypes = op.supported_dtypes(device)
# broadcasting and oncontiguous cases
cases = (
((4, 4), (4, 4), (4, 4)),
((4, 4), (1, 4, 4), (4, 4)),
((4, 4), (1, 4, 4), (4, 1, 4)),
((4, 4, 1), (1, 4, 4), (4, 4)),
((4, 1), (1, 4, 4), (1, 4)),
((4, 4), (), (4, 4)),
((4, 4), (), ()),
((), (4, 4), (1, 4, 4)),
)
for a, b, c in cases:
yield SampleInput(make_arg(a), args=(make_arg(b), make_arg(c)))
yield SampleInput(make_arg(a, noncontiguous=True),
args=(make_arg(b).transpose(0, -1), make_arg(c, noncontiguous=True).transpose(0, -1)))
# scalar cases
if supports_scalars:
cases = [
((), 1, 2,),
((), 1., 2),
((4, 4), 1., 2,),
((3, 4), make_scalar_tensor(), make_scalar_tensor()),
]
if torch.complex64 in supported_dtypes:
cases.extend([
((3, 1, 4), complex(1, 2), 3.),
])
for a, b, c in cases:
yield SampleInput(make_arg(a), args=(b, c))
# type promotion cases
# int x float
if torch.float in supported_dtypes and torch.long in supported_dtypes:
a = make_arg((), dtype=torch.long)
b = make_arg((1, 4), dtype=torch.float)
c = make_arg((3, 4))
cases = (
(a, b, c),
(c, a, b),
)
for a, b, c in cases:
yield SampleInput(a, args=(b, c))
# NaN propagation
if dtype.is_floating_point or dtype.is_complex:
nan = float('nan') if dtype.is_floating_point else complex(float('nan'), float('nan'))
a = make_arg((12,))
a[4] = nan
a[7] = nan
b = make_arg((12,))
b[1] = nan
b[7] = nan
c = make_arg((12,))
c[9] = nan
yield SampleInput(a, args=(b, c))
def _clamp_min_numpy(a, min=None):
return np.maximum(a, min)
def _clamp_max_numpy(a, max=None):
return np.minimum(a, max)
def _clamp_numpy(a, min=None, max=None):
if min is None:
return np.minimum(a, max)
if max is None:
return np.maximum(a, min)
return np.minimum(max, np.maximum(a, min))
def sample_inputs_cross(op_info, device, dtype, requires_grad, **kwargs):
sample0 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),
args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),))
sample1 = SampleInput(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),
args=(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),),
kwargs={'dim': 1})
sample2 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),
args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),),
kwargs={'dim': -1})
return (sample0, sample1, sample2)
def sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs):
def make_arg(shape):
# shrink values to be in the interval [-1, +1] for better precision in gradgradcheck
return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad)
def prod_zeros(dim_select):
assert len(dim_select) == 2
result = make_arg(3 * (S,))
result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_()
result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_()
result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_()
return result
for dim in range(3):
yield SampleInput(make_arg((S, S, S)), args=(dim,))
# Scalar tensors and empty tensor
for size in [(), (1,), (0,)]:
yield SampleInput(make_arg(size), args=(0,))
yield SampleInput(prod_zeros([0, 1]), args=(1,))
yield SampleInput(prod_zeros([0, 2]), args=(1,))
yield SampleInput(prod_zeros([1, 2]), args=(1,))
# test dtype kwarg
yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype})
def sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs):
return [SampleInput(make_tensor((S, 2), dtype=dtype, device=device, requires_grad=requires_grad),)]
def sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs):
tensors = (
make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad),
make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad)
)
return [SampleInput(tensor) for tensor in tensors]
def sample_inputs_prod(op_info, device, dtype, requires_grad, **kwargs):
def make_arg(shape):
# shrink values to be in the interval [-1, +1] for better precision in gradgradcheck
return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad)
def prod_single_zero():
result = make_arg(2 * (S,))
result[0, 1] = 0
return result
for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad):
# only Tensor, ignore other inputs
yield SampleInput(sample.input.clone().requires_grad_(requires_grad))
yield sample
# Generates samples with keepdim = True
for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad):
sample.kwargs['keepdim'] = True
yield sample
yield SampleInput(prod_single_zero())
yield SampleInput(make_arg((3, 3, 3)), args=(1,))
yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True})
# test zero scalar tensor
zero = make_arg(())
zero.zero_()
yield SampleInput(zero.clone().requires_grad_(requires_grad))
yield SampleInput(zero.clone().requires_grad_(requires_grad), args=(0,))
yield SampleInput(zero.clone().requires_grad_(requires_grad),
args=(0,),
kwargs={'keepdim': True})
def error_inputs_neg(op_info, device, **kwargs):
si = SampleInput(torch.tensor((False, True), device=device))
msg = ("Negation, the `\\-` operator, on a bool tensor is not supported."
" If you are trying to invert a mask, use the `\\~` or"
" `logical_not\\(\\)` operator instead.")
return (ErrorInput(si, error_regex=msg),)
def sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs):
vec_sample = SampleInput(make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad))
tensors = (
make_tensor((M, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
make_tensor((3, 5), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
make_tensor((5, 3), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
)
args = ((), (2,), (-2,), (1,), (2,))
samples = []
for tensor, arg in product(tensors, args):
samples.append(SampleInput(tensor.clone().requires_grad_(requires_grad), args=arg))
return samples + [vec_sample]
def sample_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# Shapes for 2D Tensors
shapes_2d = ((S, S), (3, 5), (5, 3))
# Shapes for 3D Tensors
shapes_3d = ((S, S, S),)
kwargs_2d = (dict(), dict(offset=2), dict(offset=2), dict(offset=1))
kwargs_3d = (dict(offset=1, dim1=1, dim2=2),
dict(offset=2, dim1=0, dim2=1),
dict(offset=-2, dim1=0, dim2=1))
for shape, kwarg in chain(product(shapes_2d, kwargs_2d), product(shapes_3d, kwargs_3d)):
yield SampleInput(make_arg(shape), kwargs=kwarg)
def sample_inputs_diagonal_scatter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# Shapes for 2D Tensors
shapes_2d = ((M, M), (3, 5), (5, 3))
# Shapes for 3D Tensors
shapes_3d = ((M, M, M),)
args_2d = ((), (2,), (-2,), (1,))
args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1))
for input_shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)):
input_ = make_arg(input_shape)
# We can programatically figure out the right shape for src:
# It should be the same size as input.diagonal(other_args...)
if not isinstance(arg, tuple):
arg_tuple = (arg,)
else:
arg_tuple = arg
src_shape = input_.diagonal(*arg_tuple).size()
src = make_arg(src_shape)
yield SampleInput(input_, args=(src, *arg_tuple))
def sample_inputs_to_sparse(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return (SampleInput(make_arg((S, S)), args=(), output_process_fn_grad=lambda x: x.to_dense()),
SampleInput(make_arg((S, S)), args=(1,), output_process_fn_grad=lambda x: x.to_dense()),)
def sample_inputs_cross_entropy(op_info, device, dtype, requires_grad, **kwargs):
batch_size, num_classes = shape = (2, 3)
reductions = ("mean", "sum", "none")
input_shape_and_kwargs: List[Tuple[Tuple[int, ...], Dict[str, Any]]] = [
(shape, dict()),
((*shape, 1), dict()),
((*shape, 1, 2), dict()),
((*shape, 1, 2, 3), dict()),
*[(shape, dict(reduction=reduction)) for reduction in reductions],
*[
(
shape,
dict(
weight=make_tensor((num_classes,), device=device, dtype=dtype),
reduction=reduction,
),
)
for reduction in reductions
],
(shape, dict(ignore_index=1)),
]
sample_inputs = []
for (input_shape, kwargs), probabilities_target in itertools.product(input_shape_and_kwargs, (False, True)):
input = make_tensor(input_shape, device=device, dtype=dtype, requires_grad=requires_grad)
if probabilities_target:
# ignore_index is not supported for probabilities target
if "ignore_index" in kwargs:
continue
target = make_tensor(
input_shape,
low=0,
high=1,
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
else:
target = make_tensor(
(batch_size, *input_shape[2:]),
low=0,
high=num_classes,
device=device,
dtype=torch.long,
)
if "ignore_index" in kwargs and torch.all(target == kwargs["ignore_index"]):
# make sure at least one item in target is not ignored
target[0] = random.sample(set(range(num_classes)) - {kwargs["ignore_index"]}, 1)[0]
sample_inputs.append(SampleInput(input, args=(target,), kwargs=kwargs))
return sample_inputs
# Used for log_softmax, softmax, softmin
def sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = [
((S, ), (0, )),
((S, S), (0, )),
((S, S), (1, )),
((S, S), (-1, )),
((S, M, S), (2, )),
]
# PyTorch on XLA throws an error when passed with dim argument for 0d tensor.
# See https://github.com/pytorch/xla/issues/3061 for more details.
if torch.device(device).type != 'xla':
cases.append(((), (0, )))
return [
SampleInput(make_arg(shape), args=dim, kwargs=dict(dtype=torch.float64) if with_dtype else None)
for shape, dim in cases
]
def sample_inputs_masked_softmax(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs):
"""Sample inputs for masked softmax, log_softmax, and softmin.
Masked normalization operator is a reduction operator with
trailing mask optional argument. A mask is a bool tensor with the
same shape as input or a shape that is broadcastable to input
shape.
"""
inputs: List[SampleInput] = []
for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=with_dtype, **kwargs):
for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs):
sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs)
inputs.append(SampleInput(sample_input.input.clone().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs))
return inputs
def sample_inputs_masked_cumops(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked cumsum and cumprod.
"""
inputs: List[SampleInput] = []
for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, **kwargs):
for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs):
if type(mask) != torch.Tensor:
continue
sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs)
if 'keepdim' in sample_input_kwargs:
sample_input_kwargs.pop('keepdim')
# dimension is required
if sample_input_args:
dim = sample_input.args[0]
else:
if 'dim' not in sample_input_kwargs:
continue
dim = sample_input_kwargs.pop('dim')
sample_input_args = (dim,)
inputs.append(SampleInput(sample_input.input.clone().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs))
return inputs
def sample_inputs_masked_logaddexp(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked logaddexp.
"""
inputs: List[SampleInput] = []
shapes = [(S,), (S, S), (S, M, S)]
input_mask_lists = [list(_generate_masked_op_mask(shape, device, **kwargs)) for shape in shapes]
other_mask_lists = [list(_generate_masked_op_mask(shape, device, **kwargs)) for shape in shapes]
for shape, input_masks, other_masks in zip(shapes, input_mask_lists, other_mask_lists):
for input_mask, other_mask in zip(input_masks, other_masks):
input = make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad)
other = make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad)
inputs.append(SampleInput(input.clone().requires_grad_(requires_grad),
args=(other.clone().requires_grad_(requires_grad),),
kwargs=dict(input_mask=input_mask, other_mask=other_mask)))
return inputs
def sample_inputs_masked_normalize(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked normalize.
"""
inputs: List[SampleInput] = []
for ord in [2.0, 1, float('inf'), float('-inf'), 0]:
for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, **kwargs):
sample_input_args, sample_input_kwargs = (ord,) + sample_input.args, sample_input.kwargs.copy()
inputs.append(SampleInput(sample_input.input.clone().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs))
return inputs
def sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs):
low, high = op_info.domain
# Note: Operator is very sensitive at points near the
# start and end of domain and leads to NaN for float16
# if domain_eps is 1e-5.
domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2
low = low + domain_eps
high = high - domain_eps
samples = (
SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad)),
SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=low,
high=high, requires_grad=requires_grad), args=(0.2,)),
SampleInput(make_tensor((), dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad)),
SampleInput(make_tensor((), dtype=dtype, device=device, low=low,
high=high, requires_grad=requires_grad), args=(0.2,)),
)
return samples
def sample_inputs_isin(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# isin has two paths based on the size of elements and test_elements.
# if elements.numel() < 10 * pow(test_elements.numel(), 0.145):
yield SampleInput(make_arg((L,)), args=(make_arg((S,)),))
# else:
yield SampleInput(make_arg((S,)), args=(make_arg((L,)),))
def sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))))
yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S))))
yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S))))
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))),
broadcasts_input=True)
def sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10))
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(())))
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10))
yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10))
yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(())))
yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10))
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, make_arg(())),
broadcasts_input=True)
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, 10),
broadcasts_input=True)
if torch.device(device).type == 'cuda':
# `self` and `mask` on CUDA but `value` is a CPU scalar tensor.
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, torch.randn(())))
def error_inputs_masked_fill(op_info, device, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False)
# `value` is not a 0-D tensor.
yield ErrorInput(SampleInput(make_arg((2, 2)), args=(make_arg(()) > 0, make_arg((1,)))),
error_regex="only supports a 0-dimensional value tensor, but got tensor with 1 dimension")
# downcasting complex value (scalar overload)
yield ErrorInput(SampleInput(make_arg((2, 2)), args=(make_arg(()) > 0, 1j)),
error_regex=r"value cannot be converted to type .* without overflow")
# downcasting complex value (tensor overload)
yield ErrorInput(SampleInput(torch.ones(2, dtype=torch.long, device=device),
args=(make_arg(()) > 0, torch.tensor(1j, device=device))),
error_regex=r"value cannot be converted to type .* without overflow")
if torch.device(device).type == 'cuda':
# `self` and `mask` on CPU but `value` is a CUDA scalar tensor.
yield ErrorInput(SampleInput(torch.randn((S, S), device='cpu'),
args=(torch.randn(S, S, device='cpu') > 0,
torch.randn((), device='cuda'))),
error_regex=r"to be on same device")
def sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs):
samples = (
SampleInput(make_tensor((M, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn(M, M, device=device) > 0,)),
SampleInput(make_tensor((M, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M,), device=device) > 0,)),
SampleInput(make_tensor((M,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
SampleInput(make_tensor((M, 1, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
SampleInput(make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(torch.tensor(1, device=device, dtype=torch.bool),)),
SampleInput(make_tensor((M, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(torch.tensor(1, device=device, dtype=torch.bool),)),
SampleInput(make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
)
return samples
def sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs):
samples = (
SampleInput(make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad)),
SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, requires_grad=requires_grad)),
)
return samples
def sample_inputs_matmul(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (((L,), (L,)),
((S, M), (M,)),
((M,), (M, S)),
((S, M), (M, S)),
((S, 0), (0, M)),
((S, S, M), (M,)),
((S, S, M), (M, S)),
((S, S, 0), (0, S)),
((M,), (S, M, S)),
((S, M), (S, M, S)),
((0, 0), (S, 0, 0)),
((S, S, M, M), (S, S, M, S)),
((S, S, M, M), (M,)),
((M,), (S, S, M, S)))
sample_inputs = []
for lhs_shape, rhs_shape in test_cases:
lhs = make_tensor(lhs_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)
rhs = make_tensor(rhs_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)
if op_info.name == 'matmul':
sample_inputs.append(SampleInput(lhs, args=(rhs,)))
elif op_info.name == '__rmatmul__':
sample_inputs.append(SampleInput(rhs, args=(lhs,)))
else:
raise RuntimeError("`op_info.name` must be 'matmul' or '__rmatmul__'")
return tuple(sample_inputs)
def sample_inputs_meshgrid(op_info: OpInfo, device: torch.device, dtype: torch.dtype,
requires_grad: bool,
*, variant: str, **kwargs) -> List[SampleInput]:
if variant == 'variadic':
def make_inputs(
tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor,
List[torch.Tensor]],
Tuple[torch.Tensor, ...]]:
return tensors[0], tuple(tensors[1:])
elif variant == 'list':
def make_inputs(
tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor,
List[torch.Tensor]],
Tuple[torch.Tensor, ...]]:
return tensors, ()
else:
raise ValueError(
'Unsupported variant, must be one of {"variadic", "list"}. '
f'Got "{variant}".')
SCALAR = torch.Size([])
VECTOR = torch.Size([3])
test_cases: List[List[torch.Size]] = [
[SCALAR],
[VECTOR],
[VECTOR, SCALAR],
[VECTOR, SCALAR, VECTOR],
[VECTOR, SCALAR, VECTOR, SCALAR],
]
sample_inputs = []
for shapes, indexing in itertools.product(test_cases, {'xy', 'ij'}):
input, args = make_inputs(
[make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad)
for shape in shapes])
sample_inputs.append(SampleInput(input=input, args=args,
kwargs=dict(indexing=indexing)))
return sample_inputs
def sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
tensor_shapes = ((S, S), ())
ns = (1, 2, 3, 4, 5)
for shape, n in product(tensor_shapes, ns):
yield SampleInput(make_arg(shape), args=(n,))
def sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
tensor_shapes = ((S, S), ())
ns = (1, 2, 3, 4, 5)
# Since the accepted lower bound for input
# to mvlgamma depends on `p` argument,
# the following function computes the lower bound
# which we pass to `make_tensor`.
def compute_min_val(p):
return (p - 1.) / 2
for shape, n in product(tensor_shapes, ns):
min_val = compute_min_val(n)
if not dtype.is_floating_point:
# Round-up minimum value for integral dtypes
min_val += 1
else:
min_val += 2 * torch.finfo(dtype).eps
yield SampleInput(make_arg(shape, low=min_val), args=(n,))
# Since `mvlgamma` has multiple entries,
# there are multiple common skips for the additional
# entries. Following function is a helper to that end.
def skips_mvlgamma(skip_redundant=False):
skips = (
# outside domain values are hard error for mvlgamma op.
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_float_domains'),
)
if skip_redundant:
# Redundant tests
skips = skips + ( # type: ignore[assignment]
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
)
return skips
# To test reference numerics against multiple values of argument `p`,
# we make multiple OpInfo entries with each entry corresponding to different value of p.
# We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing.
# Class `MvlGammaInfo` already contains the basic information related to the operator,
# it only takes arguments like `domain`, `skips` and `sample_kwargs`, which
# differ between the entries.
class MvlGammaInfo(UnaryUfuncInfo):
def __init__(self, variant_test_name, domain, skips, sample_kwargs):
super(MvlGammaInfo, self).__init__(
'mvlgamma',
ref=reference_mvlgamma if TEST_SCIPY else None,
aliases=('special.multigammaln',),
variant_test_name=variant_test_name,
domain=domain,
decorators=(precisionOverride({torch.float16: 5e-2}),),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half),
sample_inputs_func=sample_inputs_mvlgamma,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=skips,
sample_kwargs=sample_kwargs)
def sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs):
low, _ = op_info.domain
if requires_grad:
low = 0 + op_info._domain_eps
return (SampleInput(make_tensor((L,), dtype=dtype, device=device,
low=low,
requires_grad=requires_grad)),
SampleInput(make_tensor((), dtype=dtype, device=device,
low=low,
requires_grad=requires_grad)))
# TODO: Consolidate `i0e` with sample_inputs_unary when `make_tensor`,
# supports `exclude` argument.
# For more context: https://github.com/pytorch/pytorch/pull/56352#discussion_r633277617
def sample_inputs_i0_i1(op_info, device, dtype, requires_grad, **kwargs):
samples = (SampleInput(make_tensor((S,), dtype=dtype, device=device,
requires_grad=requires_grad)),
SampleInput(make_tensor((), dtype=dtype, device=device,
requires_grad=requires_grad)))
if requires_grad and op_info.op == torch.special.i0e:
# NOTE: `i0e`'s first-order gradient is not continous
# at `0`, hence we don't test `i0e` with any input being `0`.
# TODO: Remove this when `make_tensor` supports excluding `0`.
for sample in samples:
t = sample.input
t[t == 0] = torch.finfo(dtype).eps # type: ignore[index]
elif requires_grad and op_info.op != torch.special.i0e:
# Special Case for gradient
# Sample with `0` in the input
t = make_tensor((S,), dtype=dtype, device=device,
requires_grad=requires_grad)
t[0] = 0
samples += (SampleInput(t),) # type: ignore[assignment]
return samples
def sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad)
samples = [
SampleInput(_make_tensor_helper((S, S, S)), args=(0,)),
SampleInput(_make_tensor_helper((S, S, S)), args=(1,)),
SampleInput(_make_tensor_helper(()), args=(0,)),
]
if supports_dtype_kwargs:
# NOTE: if `dtype` is not same as input, then inplace variants fail with
# `provided dtype must match the dtype of self tensor in cumsum`
samples.append(SampleInput(_make_tensor_helper((S, S, S)), args=(1,), kwargs={'dtype': dtype}))
return samples
def sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
((), (0, 1, 1)),
((S, S, S, S), (0, 3, 1)),
((S, S, S, S), (1, 3, 1)),
((S, S, S, S), (2, 3, 1)),
((S, S, S, S), (3, 3, 1)),
((S, S, S, S), (0, 3, 2)),
((S, S, S, S), (1, 3, 2)),
((S, S, S, S), (2, 3, 2)),
((S, S, S, S), (3, 3, 2)),
((S, S, S, S), (0, 4, 1)),
((S, S, S, S), (1, 4, 1)),
((S, S, S, S), (2, 4, 1)),
((S, S, S, S), (3, 4, 1)),
((M,), (0, 3, 1)),
((M,), (0, 3, 2)),
((M,), (0, 3, 3)),
((1000,), (0, 3, 11)),
((1000,), (0, 2, 27)),
((10, 10), (0, 1, 2)),
((10, 10), (1, 2, 3)),
((10, 10), (1, 2, 2)),
((S, S, S), (2, 3, 2)),
)
sample_inputs = []
for shape, arguments in test_cases:
sample_inputs += [SampleInput(make_tensor(shape, dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad),
args=arguments)]
return sample_inputs
def sample_inputs_split(op_info, device, dtype, requires_grad, *, list_args=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
if list_args:
cases = (
((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)),
((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], 2),),
((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], -2),)
)
else:
cases = ( # type: ignore[assignment]
((S, S, S), (2,)),
((S, S, S), (S, 1)),
)
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
def sample_inputs_split_with_sizes(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)),
((S, S, S), ([int(S / 3), S - int(S / 3), 0],)),
((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], 2)),
((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], -2)),
)
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
def sample_inputs_msort(op_info, device, dtype, requires_grad, **kwargs):
def apply_grad(t):
if dtype in floating_types_and(torch.float16, torch.bfloat16):
t.requires_grad_(requires_grad)
def large_1d_unique(dtype, device):
res = torch.randperm(L * L * L, dtype=torch.int64, device=device)
res = res.to(dtype)
apply_grad(res)
return res
samples = []
# Test case for large tensor.
largesample = SampleInput(large_1d_unique(dtype, device))
sample = SampleInput(make_tensor((S, M, S), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad))
return [largesample, sample]
def sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
samples = (
# no broadcast
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4)),
# broadcast rhs
SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4)),
# scalar tensor
SampleInput(make_arg(()), args=(make_arg(()), 0.4)),
# broadcast rhs scalar-tensor
SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4)),
# broadcast rhs with weight tensor
SampleInput(make_arg((S, S)), args=(make_arg((S,)), make_arg((S, S)))),
# broadcast rhs and weight tensor
SampleInput(make_arg((S, S)), args=(make_arg((S, 1)), make_arg((S,)))),
# broadcast lhs
SampleInput(make_arg((S,)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),
# scalar broadcast_lhs
SampleInput(make_arg(()), args=(make_arg((S, S)), 0.4), broadcasts_input=True),
# broadcast all
SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),
# tensor broadcast all
SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), make_arg((S, 1))),
broadcasts_input=True),
# no broadcast with weight tensor
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), make_arg((S, S)))),
# broadcast lhs with weight tensor
SampleInput(make_arg((S,)), args=(make_arg((S, S)), make_arg((S, S))), broadcasts_input=True),
# broadcast lhs and weight tensor
SampleInput(make_arg((S,)), args=(make_arg((S, S, S)), make_arg((S, S))), broadcasts_input=True),
# broadcast lhs and weight tensor variant
SampleInput(make_arg((S, S)), args=(make_arg((S, S, S)), make_arg((S,))), broadcasts_input=True),
)
if dtype.is_complex:
samples = samples + ( # type: ignore[assignment]
# no broadcast
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4j)),
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 1.2 + 0.1j)),
# broadcast rhs
SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4j)),
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 5.4 + 9j)),
# scalar tensor
SampleInput(make_arg(()), args=(make_arg(()), 0.4j)),
SampleInput(make_arg(()), args=(make_arg(()), 6.1 + 0.004j)),
# broadcast rhs scalar-tensor
SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4j)),
SampleInput(make_arg((S, S)), args=(make_arg(()), 1 + 2j)),
)
return samples
def sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs):
cases = (
((2, 2, 2), (2, 2, 2), (2)),
((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])),
)
samples = []
for first_shape, second_shape, dims in cases:
samples.append(SampleInput(make_tensor(first_shape, dtype=dtype, device=device,
requires_grad=requires_grad),
args=(make_tensor(second_shape, dtype=dtype, device=device,
requires_grad=requires_grad),),
kwargs=dict(dims=dims,)))
return tuple(samples)
def sample_inputs_kron(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
((S, S), (M, L)),
)
sample_inputs = []
for input_shape, other_shape in test_cases:
input = make_tensor(input_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)
other = make_tensor(other_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)
sample = SampleInput(input, args=(other,))
sample_inputs.append(sample)
return tuple(sample_inputs)
def sample_inputs_inner(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((S, ), dtype=dtype, device=device, requires_grad=requires_grad),
args=(
make_tensor((S, ), dtype=dtype, device=device, requires_grad=requires_grad),
)
),
SampleInput(
make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad),
args=(
make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad),
)
),
)
def sample_inputs_scatter(op_info, device, dtype, requires_grad, **kwargs):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad)
def _gather(shape, index_dim, max_indices):
return gather_variable(shape, index_dim, max_indices, device=device)
zero = torch.tensor(0, dtype=torch.long, device=device)
test_cases = (
(_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),
(_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),
(_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor(()), (0, zero.clone().detach(), _tensor(()))),
(_tensor(()), (0, zero.clone().detach(), 2.5)),
)
samples = []
for tensor, args in test_cases:
samples.append(SampleInput(tensor, args=args))
if not requires_grad:
samples.append(SampleInput(
tensor.clone().detach(),
args=args, kwargs={'reduce': 'add'}
))
if dtype.is_floating_point:
samples.append(SampleInput(
tensor.clone().detach(),
args=args, kwargs={'reduce': 'multiply'}
))
return samples
def sample_inputs_scatter_add(op_info, device, dtype, requires_grad, **kwargs):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad)
def _gather(shape, index_dim, max_indices):
return gather_variable(shape, index_dim, max_indices, device=device)
zero = torch.tensor(0, dtype=torch.long, device=device)
test_cases = (
(_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),
(_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),
(_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor(()), (0, zero.clone().detach(), _tensor(()))),
)
return [SampleInput(tensor, args=args) for tensor, args in test_cases]
def sample_inputs_scatter_reduce(op_info, device, dtype, requires_grad, **kwargs):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad)
def _gather(shape, index_dim, max_indices):
return gather_variable(shape, index_dim, max_indices, device=device)
zero = torch.tensor(0, dtype=torch.long, device=device)
test_cases = (
((M, S), 0, _gather((S, S), 1, M), (S, S)),
((M, S), 1, _gather((S, S), 0, S), (S, S)),
((M, S), -1, _gather((S, S), 0, S), (S, S)),
((M, S), 0, _gather((M, S // 2), 1, M), (M, S // 2)),
((M, S), 1, _gather((M, S // 2), 0, S), (M, S // 2)),
((M, S), -1, _gather((M, S // 2), 0, S), (M, S // 2)),
((), 0, zero.clone().detach(), ()),
)
reduce = op_info.variant_test_name
for args, include_self in product(test_cases, [True, False]):
inp_shape, dim, index, src_shape = args
yield SampleInput(_tensor(inp_shape),
args=(dim, index, _tensor(src_shape), reduce),
kwargs={'include_self': include_self})
# Sample inputs to test edge cases for backward
# Check that gradients are propagated correctly for prod when zeros in self/src are reduced
if requires_grad and reduce == 'prod':
# This sample tests gradients for the following cases
# (a) 1 zero reduced (from src (self[0, 1], self[1, 1]), from self (self[0, 0], self[2, 0]))
# (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0])
# (c) no zeros reduced (self([2, 1]))
# (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py
# test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad
input = torch.tensor([[0, 13], [0, 17], [0, 19]], dtype=dtype, device=device, requires_grad=requires_grad)
src = torch.tensor([[0, 1, 2, 3], [0, 4, 0, 1], [2, 3, 5, 6]], dtype=dtype, device=device, requires_grad=requires_grad)
idx = torch.tensor([[1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]], dtype=torch.long, device=device)
yield SampleInput(input,
args=(1, idx, src, reduce),
kwargs={'include_self': True})
def sample_inputs_segment_reduce(op_info, device, dtype, requires_grad, *, mode='lengths', **kwargs):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad)
zero = torch.tensor(0, dtype=torch.long, device=device)
test_cases = (
# inp_shape, dim, lengths, unsafe
((S,), 0, [0, 1, 2, 2], False),
((S,), 0, [0, 1, 2, 2], True),
((S,), 0, [2, 0, 3, 0], False),
((S, S), 0, [0, 1, 2, 2], False),
# test when lengths do not sum to dim size
((M, S, S), 0, [1, 2, 0, 6, 0], True),
# test for higher dimensions
((S, S), 1, [[0, 1, 2, 2] for _ in range(S)], False),
((S, S), 1, [[2, 0, 3, 0], [0, 1, 2, 2], [3, 0, 2, 0], [1, 1, 1, 2], [0, 1, 2, 2]], False),
((S, S, S), 1, [[0, 1, 2, 2] for _ in range(S)], False),
((S, S, S), 1, [[2, 0, 3, 0], [0, 1, 2, 2], [3, 0, 2, 0], [1, 1, 1, 2], [0, 1, 2, 2]], False),
)
reductions = ["max", "mean", "min", "sum", "prod"]
for args, reduce, initial in product(test_cases, reductions, [1, 2]):
inp_shape, dim, lengths, unsafe = args
lengths_t = torch.tensor(lengths, dtype=torch.long, device=device)
sample_input_kwargs = {'axis': dim, 'unsafe': unsafe, 'initial': initial}
if mode == 'lengths':
sample_input_kwargs['lengths'] = lengths_t
elif mode == 'offsets':
zeros_shape = list(lengths_t.shape)
zeros_shape[dim] = 1
offsets_t = torch.cat((lengths_t.new_zeros(zeros_shape), lengths_t), dim).cumsum_(dim)
sample_input_kwargs['offsets'] = offsets_t
else:
raise RuntimeError(f"mode most be one of 'offsets' or 'lengths' got '{mode}'.")
yield SampleInput(_tensor(inp_shape),
args=(reduce,),
kwargs=sample_input_kwargs)
def sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs):
samples = (SampleInput(make_tensor((S, S, S), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad)),
SampleInput(make_tensor((), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad)),
SampleInput(make_tensor((S, S, S), dtype=dtype, device=device,
low=None, high=None,
requires_grad=requires_grad, noncontiguous=True)),)
return samples
def sample_inputs_tril_triu(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((M, M), ()),
((M, M), (2,),),
((S, M, M), ()),
((S, M, M), (2,)),
((3, 3, S, S), ()),)
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
def sample_inputs_clone_contiguous(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
yield SampleInput(make_arg((S, M, S)))
yield SampleInput(make_arg(()))
def reference_inputs_clone_contiguous(op, device, dtype, requires_grad, **kwargs):
# NOTE: the default memory format for clone is torch.preserve_format, for contiguous it's torch.contiguous_format
# This exploits that default to test torch.preserve_format for clone, without causing an error when testing contiguous
yield from sample_inputs_clone_contiguous(op, device, dtype, requires_grad, **kwargs)
shapes = (
(3, 5, 6),
(1, 1, 3, 5, 6),
(1, 1, 3, 5, 6, 1, 1),
(1, 0, 3, 5, 0, 2),
(1, 0, 3, 5, 0, 0, 1, 1, 2),
(),
)
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
for shape in shapes:
yield SampleInput(make_arg(shape))
yield SampleInput(make_arg(shape).transpose(0, -1))
yield SampleInput(make_arg(shape, noncontiguous=True))
yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1))
yield SampleInput(make_arg(shape), kwargs={'memory_format': torch.contiguous_format})
yield SampleInput(make_arg(shape).transpose(0, -1), kwargs={'memory_format': torch.contiguous_format})
yield SampleInput(make_arg(shape, noncontiguous=True), kwargs={'memory_format': torch.contiguous_format})
yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1), kwargs={'memory_format': torch.contiguous_format})
# shape, strides, offset
strided_cases = (
((5, 6, 2), (1, 1, 7), 2),
((5, 5, 4), (1, 1, 7), 2),
((5, 5, 2), (4, 5, 7), 3),
((5, 5, 2), (5, 5, 7), 3),
((5, 5, 2), (5, 5, 5), 3),
((9, 5, 2), (0, 1, 7), 3),
)
for shape, strides, offset in strided_cases:
yield SampleInput(make_arg(500,).as_strided(shape, strides, offset))
yield SampleInput(make_arg(500,).as_strided(shape, strides, offset), kwargs={'memory_format': torch.contiguous_format})
# channels last 2D
yield SampleInput(make_arg((2, 2, 2, 2)), kwargs={'memory_format': torch.channels_last})
a = make_arg((2, 2, 2, 2)).permute(0, 3, 1, 2)
yield SampleInput(a, kwargs={'memory_format': torch.channels_last})
# channels last 3D
yield SampleInput(make_arg((2, 2, 2, 2, 2)), kwargs={'memory_format': torch.channels_last_3d})
a = make_arg((2, 2, 2, 2, 2)).permute(0, 4, 1, 2, 3)
yield SampleInput(a, kwargs={'memory_format': torch.channels_last_3d})
def sample_inputs_sum_to_size(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# list of tuples (shape, shape) defining the shapes of the input and output tensors
sample_shapes = [
((), ()),
((S), (1)),
((S, S), (1, 1)),
((S, S), (1, S)),
((S, S), (S, S)),
((S, S, S), (S, 1, S)),
]
samples = []
for input_shape, output_shape in sample_shapes:
input_t = make_arg(input_shape)
samples.append(SampleInput(input_t, args=(output_shape,)))
return samples
def sample_inputs_resize_ops(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, S, S), (S * S, S)),
((), ()),
((), (1, 1, 1)),
)
for shape, args_or_shape in cases:
# Update `args` based on operator
if op_info.name == 'resize_':
# resize_ takes shape/tuple of ints,
args = (args_or_shape, )
elif op_info.name == 'resize_as_':
# resize_as_ takes another tensor
args = (make_arg(shape, requires_grad=False), ) # type:ignore[assignment]
else:
raise ValueError("sample_inputs_resize_ops is being used with incorrect operator")
yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args))
def sample_inputs_view_reshape(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (
((S, S, S), (S * S, S)),
((S * S, S), (S, S, S)),
((S * S, S), (S, -1, S)),
((S * S * 2, S), (S, -1)),
((S,), (S,)),
((), ()),
((), (1,)),
)
for shape, args in cases:
yield SampleInput(make_arg(shape), args=(args,))
if kwargs.get("transpose_samples", False) and len(shape) >= 2:
transposed = make_arg(shape).transpose(0, 1).detach().requires_grad_(requires_grad)
yield SampleInput(transposed, args=(args,))
def reference_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs)
cases = (
((125,), (25, 5)),
((25, 25), (1, 5, 5, 1, 5, 1, 5, 1)),
((16, 32), (2, 4, 1, 4, 4, 1, 4)),
((16, 12), (12, 16)),
((1, 16, 12), (12, 16)),
((1, 5, 1, 5), (25, 1)),
((2, 4, 2), (4, 4)),
((1, 4), (1, 1, 2, 1, 2)),
((3, 5, 7), (7, 5, 3)),
((1,), ()),
((5, 0, 2, 3), (5, 0, 2, 3)),
((2, 1, 0, 3, 1), (5, 0)),
((1,), ()),
((4, 5, 6), (4, 5, 6, 1, 1, 1)),
((), (1, 1, 1, 1)),
)
irreversible_cases = (
((), (-1,)),
((4, 7, 9, 1, 1), (1, 4, 3, -1, 1)),
)
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
for a, b in cases:
yield SampleInput(make_arg(a), args=(b,))
yield SampleInput(make_arg(b), args=(a,))
if kwargs.get("transpose_samples", False):
yield SampleInput(make_arg(a, noncontiguous=True).transpose(0, -1), args=(b,))
else:
yield SampleInput(make_arg(a, noncontiguous=True), args=(b,))
for a, b in irreversible_cases:
yield SampleInput(make_arg(a), args=(b,))
def error_inputs_reshape(op, device, **kwargs):
cases = (
# Reshape to different numel
((2,), ()),
((1, 3, 0), ()),
((4, 3), (4, 2)),
((1, 3, 5), (5, 2, 2)),
# No valid inference
((1, 3, 5), (5, -1, 2)),
# Two inferred shapes
((1, 3, 5), (5, -1, -1)),
((1), (0, -1)),
((0, 5), (0, -1)),
)
make_arg = partial(make_tensor, dtype=torch.float32, device=device, requires_grad=False)
for a, b in cases:
yield ErrorInput(SampleInput(make_arg(a), args=(b,)), error_type=Exception, error_regex="")
def sample_inputs_view_as_reshape_as(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, S, S), (S * S, S)),
((), ()),
((), (1, 1)),
)
for case in cases:
shape, shape_other = case
inp = make_arg(shape, requires_grad=requires_grad)
yield(SampleInput(inp, args=(make_arg(shape_other, requires_grad=False),)))
if op_info.name != "view_as" and len(shape) >= 2:
yield(SampleInput(
inp.clone().transpose(0, 1).requires_grad_(requires_grad),
args=(make_arg(shape_other, requires_grad=False),)))
def sample_inputs_atleast1d2d3d(op_info, device, dtype, requires_grad, **kwargs):
input_list = []
shapes = ((S, S, S, S), (S, S, S), (S, S), (S, ), (),)
make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
samples = []
for shape in shapes:
input_list.append(make_tensor_partial(shape))
samples.append(SampleInput(make_tensor_partial(shape)))
samples.append(SampleInput(input_list, ))
return samples
def sample_inputs_column_stack(op_info, device, dtype, requires_grad, **kwargs):
input_list = []
cases: Tuple[tuple, tuple] = ( # type: ignore[assignment]
((S, 2, 1), (S, 3, 1)),
((S), (S, 5)), ((), (1, S))
)
make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
for shape1, shape2 in cases:
input_list.append(SampleInput([make_tensor_partial(shape1), make_tensor_partial(shape2)]))
return input_list
def sample_inputs_flatten(op_info, device, dtype, requires_grad, **kwargs):
samples = []
shapes = ((S, S, S), (S, S), (S, ), (),)
make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
for shape in shapes:
samples.append(SampleInput(make_tensor_partial(shape)))
if len(shape) > 1:
samples.append(SampleInput(make_tensor_partial(shape), kwargs=dict(start_dim=1, end_dim=-1)))
return samples
def reference_inputs_flatten(op, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_flatten(op, device, dtype, requires_grad, **kwargs)
# shape x start_dim x end_dim
cases = (
((5, 4, 0, 1, 3, 7), 1, 3),
((5, 4, 0, 1, 3, 7), 4, 5),
((5, 4, 1, 1, 3, 7), 2, 3),
((), 0, -1),
((1,), 0, -1),
((3, 7, 5), 1, 2),
((4, 5), 1, 1),
((1, 5, 5, 1, 5, 1, 5, 1), 0, 2),
((1, 5, 5, 1, 5, 1, 5, 1), 3, -1),
((1, 5, 5, 1, 5, 7, 5, 1), -2, -1),
((2, 4, 2), 0, 1),
((4, 2, 2), 1, 2),
((0, 3, 4, 5), 1, 3),
)
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
for shape, start, end in cases:
yield SampleInput(make_arg(shape), args=(start, end,))
yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1), args=(start, end,))
yield SampleInput(make_arg(shape).transpose(0, -1), args=(start, end,))
def sample_inputs_unflatten(op_info, device, dtype, requires_grad, **kwargs):
# in_shape, dim, sizes
args = (((8,), 0, (8,)),
((8,), 0, (4, 2)),
((8,), -1, (2, 2, 2)),
((8,), -1, (-1, 2)),
((3, 6, 2), 1, (2, 3)),
((3, 6, 2), -2, (2, 3)),
((3, 6, 2), -2, (-1, 3)),
((3, 2, 12), 2, (3, 2, 2)),
((4, 0), 0, (2, 2)),
((4, 0), 1, (2, 0, 0, 0)),
)
make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
for in_shape, dim, sizes in args:
yield SampleInput(make_tensor_partial(in_shape), args=(dim, sizes))
def sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (1, 2)),
((S, S, S), (-1, 2)),
((S, S, S), (-1, -1)),
((S, S, S), (1, -1)),
((S,), (0, 2))
)
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
def sample_inputs_select_scatter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (S, S), (1, 2)),
((S, S, S), (S, S), (-1, 2)),
((S, S, S), (S, S), (-1, -1)),
((S, S, S), (S, S), (1, -1)),
((S,), (), (0, 2))
)
for input_shape, src_shape, args in cases:
input_ = make_arg(input_shape)
src = make_arg(src_shape)
yield SampleInput(input_, args=(src, *args))
def sample_inputs_slice_scatter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((L, L, L), (L, L, L,), (0, 0, L, 1)),
((L, L, L), (L // 2, L, L,), (0, L // 2, L, 1)),
((L, L, L), (L // 4, L, L,), (0, L // 2, L, 2)),
((L, L, L), (L, L, L,), (1, 0, L, 1)),
((L, L, L), (L, L // 2, L,), (1, L // 2, L, 1)),
((L, L, L), (L, L // 4, L,), (1, L // 2, L, 2)),
((L, L, L), (L, L, L,), (2, 0, L, 1)),
((L, L, L), (L, L, L // 2,), (2, L // 2, L, 1)),
((L, L, L), (L, L, L // 4,), (2, L // 2, L, 2)),
)
for input_shape, src_shape, args in cases:
input_ = make_arg(input_shape)
src = make_arg(src_shape)
yield SampleInput(input_, args=(src, *args))
def sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, 1, 1), (S, S, S)),
((S, 1, S), (S, S, S)),
((S, 1, S), (-1, S, -1)),
((S, 1, S), (-1, S, S)),
((S, 1), (S, S, S)),
((1,), (S, S, S)),
((1, S), (1, 1, S)),
((), ()),
((), (1, 3, 2)),
)
for case in cases:
shape, args = case
yield(SampleInput(make_arg(shape), args=(args, )))
def sample_inputs_conversion(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
shapes = ((),
(2, 3))
memory_format_options = [None, torch.contiguous_format]
for shape, memory_format in itertools.product(shapes, memory_format_options):
yield SampleInput(make_arg(shape),
kwargs={'memory_format': memory_format} if memory_format else {})
yield SampleInput(make_arg((2, 3, 2, 3)), kwargs={'memory_format': torch.channels_last})
def sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, 1, 1), (S, S, S)),
((), ()),
((), (1, 1)),
)
for shape, shape_other in cases:
yield(SampleInput(make_arg(shape, requires_grad=requires_grad),
args=(make_arg(shape_other, requires_grad=False), )))
def sample_inputs_where(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def make_bool_mask(shape):
# Make sure atleast one element is nonzero,
# except for empty tensor
mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)
if mask_t.numel() == 0:
return mask_t
elif mask_t.numel() == 1:
mask_t.fill_(True)
return mask_t
if mask_t.sum() == 0:
def random_index(shape):
return tuple(map(lambda max_idx: random.randint(0, max_idx), shape))
mask_t[random_index(mask_t.shape)] = True
return mask_t
return mask_t
cases = (((M, M), (M, M), (M, M), False),
((M, 1, M), (M, M), (M, M, 1), True),
((), (), (), False),
((M, 1, M), (), (M, M, 1), True),
((), (M, M), (), True),)
for shape, mask_shape, other_shape, broadcasts_input in cases:
yield SampleInput(make_arg(shape),
args=(make_bool_mask(mask_shape), make_arg(other_shape)),
broadcasts_input=broadcasts_input)
# TODO: add reference inputs for where(condition) signature
def reference_inputs_where(op, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_where(op, device, dtype, requires_grad, **kwargs)
make_cond = partial(make_tensor, dtype=torch.bool, device=device, requires_grad=requires_grad)
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# noncontiguous
c = make_cond((10, 3), noncontiguous=True)
a = make_arg((10, 1), noncontiguous=True)
b = make_arg((3, 10, 3)).transpose(0, -1)
# NOTE that the OpInfo for where takes samples of the form a, cond, b
yield SampleInput(a, args=(c, b))
# type promoting
other_dtype = torch.double if dtype is not torch.double else torch.long
c = make_cond((10, 3), noncontiguous=True)
a = make_arg((10, 1), dtype=torch.long)
b = make_arg((10, 1))
yield SampleInput(a, args=(c, b))
# two python scalars
c = make_cond((10, 3), noncontiguous=True)
a = make_arg((1,)).item()
b = make_arg((1,)).item()
yield SampleInput(a, args=(c, b))
# NaN propagation
if dtype.is_floating_point or dtype.is_complex:
if dtype.is_floating_point:
nan = float('nan')
else:
# dtype.is_complex
nan = complex(float('nan'), float('nan'))
c = make_cond((1, 10, 3))
a = make_arg((10, 3), noncontiguous=True)
a[2, 1] = nan
b = make_arg((1, 3))
b[0, 2] = nan
yield SampleInput(a, args=(c, b))
# Python scalars type promotion
for scalar in (0, 0.0, 2j, False):
yield SampleInput(scalar, args=(c, b))
yield SampleInput(a, args=(c, scalar))
def error_inputs_where(op_info, device, **kwargs):
shape = (S,)
err_msg = "Expected all tensors to be on the same device"
for devices in product(('cpu', device), repeat=3):
if len(set(devices)) == 2:
si = SampleInput(make_tensor(shape, device=devices[0], dtype=torch.float32),
args=(make_tensor(shape, dtype=torch.bool, device=devices[1]),
make_tensor(shape, device=devices[2], dtype=torch.float32)))
yield ErrorInput(si, error_regex=err_msg)
def sample_inputs_nonzero(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
inputs = []
for shape in sizes:
# construct input without any non-zero elements
zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad)
inputs.append(zeros)
# construct input with mixed zero and non-zero elements
mixed = make_arg(shape).requires_grad_(False)
mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)
mixed[mask_t] = 0
inputs.append(mixed)
for input_t, as_tuple in product(inputs, [False, True]):
yield(SampleInput(input_t.clone().requires_grad_(requires_grad),
kwargs=dict(as_tuple=as_tuple)))
def sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (2,)),
((S, S, S), (S, 1)),
((S, S, S), (S, -1)))
for case in cases:
shape, args = case
yield(SampleInput(make_arg(shape), args=args))
def reference_inputs_chunk(op, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_chunk(op, device, dtype, requires_grad, **kwargs)
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# shape x chunks x dim
cases = (
((13, 9, 11), 17, -1),
((13, 9, 11), 11, -1),
((13,), 12, -1),
((15,), 12, -1),
((15,), 7, 0),
((15,), 9, 0),
((3, 7), 9, 1),
((3, 7), 9, 0),
((3, 7), 2, 0),
((3, 7), 3, 0),
((3, 7), 1, 0),
((3, 7), 1, 1),
((4, 4), 2, 0),
)
for shape, chunks, dim in cases:
yield SampleInput(make_arg(shape), args=(chunks, dim))
def sample_inputs_kthvalue(op_info, device, dtype, requires_grad, **kwargs):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad)
test_cases = [
(_tensor((S, S, S)), (2,)),
(_tensor((S, S, S)), (2, 1,)),
(_tensor((S, S, S)), (2, -1,)),
(_tensor((S, S, S)), (2, 1, True,)),
(_tensor((S, S, S)), (2, -1, True,)),
(_tensor((S,)), (2, 0,)),
(_tensor((S,)), (2, 0, True,)),
(_tensor(()), (1,)),
(_tensor(()), (1, 0,)),
(_tensor(()), (1, 0, True))
]
return [SampleInput(tensor, args=args) for tensor, args in test_cases]
def error_inputs_kthvalue(op_info, device, **kwargs):
# tests overlapping output fails
t = make_tensor(10, dtype=torch.float32, device=device)
indices = torch.empty((), device=device, dtype=torch.long)
si = SampleInput(t, args=(5,), kwargs={'out': (t, indices)})
k_out_of_range_err = "selected number k out of range for dimension"
return (ErrorInput(si, error_regex="unsupported operation"),
ErrorInput(SampleInput(torch.randn(2, 2, device=device), args=(3, 0)),
error_regex=k_out_of_range_err),
ErrorInput(SampleInput(torch.randn(2, 2, device=device), args=(3,)),
error_regex=k_out_of_range_err),
ErrorInput(SampleInput(torch.tensor(2, device=device), args=(3,)),
error_regex=k_out_of_range_err),)
def sample_inputs_dropout(op_info, device, dtype, requires_grad, *,
train=None, valid_input_dim=None, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
if valid_input_dim:
cases = ((S,) * i for i in valid_input_dim)
else:
cases = ((S, S), (S,), ())
p_vals = [0.0, 0.5, 1.0]
# This is to handle special case for feature_alpha_dropout which has different
# supported dtypes depending on `train` parameter
training_vals = [train] if train is not None else [True, False]
for case, p, training in product(cases, p_vals, training_vals):
yield SampleInput(make_arg(case), kwargs=dict(p=p, training=training))
yield SampleInput(make_arg(case), kwargs=dict())
def sample_inputs_embedding_bag(op_info, device, dtype, requires_grad, **kwargs):
def make_input(shape):
return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def make_long_input(shape, *, low, high, noncontiguous=False):
return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high,
noncontiguous=noncontiguous)
def make_per_sample_weight(flag, idx):
# a tensor of float / double weights, or None
# to indicate all weights should be taken to be 1
if flag:
return make_input(idx.shape)
return None
offsets = torch.tensor([0, 3], device=device, dtype=torch.long)
for generate_per_sample_weight in (True, False):
for mode in ('sum', 'mean', 'max'):
# per_sample_weights is only supported for mode='sum' (got mode='****')
if generate_per_sample_weight and mode in ('mean', 'max'):
continue
# 1-D index tensor
idx = make_long_input((S,), low=0, high=M)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((M, S)), args=(idx,),
kwargs={'offsets': offsets, 'mode': mode,
'per_sample_weights': per_sample_weights})
idx = make_long_input((S,), low=0, high=M, noncontiguous=True)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((M, S)), args=(idx,),
kwargs={'offsets': offsets, 'mode': mode,
'per_sample_weights': per_sample_weights})
# bag with zero length
idx = make_long_input((S,), low=0, high=M, noncontiguous=True)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((M, S)), args=(idx,),
kwargs={'offsets': torch.tensor([0, 0, 3], device=device, dtype=torch.long),
'mode': mode,
'per_sample_weights': per_sample_weights})
# 2-D index tensor
idx = make_long_input((S, S), low=0, high=M)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((M, S)), args=(idx,),
kwargs={'mode': mode, 'per_sample_weights': per_sample_weights})
idx = make_long_input((S, S), low=0, high=M, noncontiguous=True)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((M, S)), args=(idx,),
kwargs={'mode': mode, 'per_sample_weights': per_sample_weights})
# The gradient vector at `padding_idx` is not updated.
# Negative padding_idx
idx = make_long_input((6,), low=0, high=S)
idx[0] = 4
idx[4] = 4
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((S, S)), args=(idx,),
kwargs={'padding_idx': -1, 'offsets': offsets,
'mode': mode, 'per_sample_weights': per_sample_weights},)
idx = make_long_input((3, 3), low=0, high=S)
# Positive padding_idx
idx[0, 0] = 2
idx[1, 1] = 2
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((S, S)), args=(idx,),
kwargs={'padding_idx': 2, 'mode': mode,
'per_sample_weights': per_sample_weights},)
idx = make_long_input((6, ), low=0, high=S)
weights = make_input((S, S))
offsets_ = torch.tensor([0, 3, 6], device=device, dtype=torch.long)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'mode': mode, 'offsets': offsets_, 'include_last_offset': True},)
if not requires_grad:
# Following inputs return different gradient from the numerical gradient.
# This is expected and relevant tests are present in `test_nn.py`.
# Due to inplace renorming of weight, the numerical gradient doesn't match the
# analytical gradient.
idx = make_long_input((2, 2), low=0, high=S)
weights = make_input((S, S)) * 2
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'max_norm': 1., 'mode': mode,
'per_sample_weights': per_sample_weights},)
idx = make_long_input((6, ), low=0, high=S)
weights = make_input((S, S)) * 2
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'max_norm': 1., 'norm_type': 1.0,
'mode': mode, 'offsets': offsets,
'per_sample_weights': per_sample_weights},)
if mode != 'max':
# Scale the gradient based on the inverse frequency of a particular index.
# Note : smax mode does not support sparse weights
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 1
idx[0, 1] = 1
weights = make_input((S, S))
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'scale_grad_by_freq': True, 'mode': mode,
'per_sample_weights': per_sample_weights},)
# gradcheck not implemented for sparse tensors.
# Note : max mode does not support sparse weights
idx = make_long_input((6, ), low=0, high=S)
weights = make_input((S, S))
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'sparse': True, 'offsets': offsets,
'mode': mode, 'per_sample_weights': per_sample_weights})
idx = make_long_input((6, ), low=0, high=S)
idx[0] = 1 # freq more than 1
idx[1] = 1 # freq more than 1
idx[3] = 0 # padding_idx
weights = make_input((S, S)) * 2
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'sparse': True, 'scale_grad_by_freq': True, 'padding_idx': 0,
'max_norm': 1., 'offsets': offsets,
'mode': mode, 'per_sample_weights': per_sample_weights})
def sample_inputs_embedding(op_info, device, dtype, requires_grad, **kwargs):
def make_input(shape):
return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def make_long_input(shape, *, low, high):
return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high)
# 0-D index tensor
idx = make_long_input((), low=0, high=M)
yield SampleInput(make_input((M, S)), args=(idx,),)
# 1-D index tensor
idx = make_long_input((S,), low=0, high=M)
yield SampleInput(make_input((M, S)), args=(idx,),)
# 2-D index tensor
idx = make_long_input((S, S), low=0, high=M)
yield SampleInput(make_input((M, S)), args=(idx,),)
if not requires_grad:
# Following inputs return different gradient from the numerical gradient.
# This is expected and relevant tests are present in `test_nn.py`.
# The gradient vector at `padding_idx` is not updated.
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 2
idx[1, 1] = 2
yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2},)
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 4
idx[1, 1] = 4
yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1},)
# Due to inplace renorming of weight, the numerical gradient doesn't match the
# analytical gradient.
idx = make_long_input((2, 2), low=0, high=S)
weights = make_input((S, S)) * 2
yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1.},)
idx = make_long_input((2, 2), low=0, high=S)
weights = make_input((S, S)) * 2
yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'norm_type': 1.0},)
# Scale the gradient based on the inverse frequency of a particular index.
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 1
idx[0, 1] = 1
weights = make_input((S, S))
yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True},)
# gradcheck not implemented for sparse tensors.
idx = make_long_input((2, 2), low=0, high=S)
weights = make_input((S, S))
yield SampleInput(weights, args=(idx,), kwargs={'sparse': True})
idx = make_long_input((3, 3), low=0, high=S)
idx[0, 0] = 1 # freq more than 1
idx[0, 1] = 1 # freq more than 1
idx[1, 0] = 0 # padding_idx
weights = make_input((S, S)) * 2
yield SampleInput(weights, args=(idx,),
kwargs={'sparse': True, 'scale_grad_by_freq': True,
'padding_idx': 0, 'max_norm': 1.})
def sample_inputs_one_hot(op_info, device, dtype, requires_grad, **kwargs):
def make_input(shape, *, low, high):
return make_tensor(shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad)
shapes = ((), (S,), (L, M, S))
num_classess = (-1, 10)
return [
SampleInput(
make_input(
shape,
low=0,
high=10 if num_classes == -1 else num_classes // 2,
),
kwargs=dict(num_classes=num_classes),
)
for shape, num_classes in itertools.product(shapes, num_classess)
]
def sample_inputs_tensorinv(op_info, device, dtype, requires_grad, **kwargs):
make_arg = make_fullrank_matrices_with_distinct_singular_values
def make_input():
return make_arg(12, 12, device=device, dtype=dtype, requires_grad=requires_grad)
# lhs / rhs shape can have any number of dimensions as long as their product equals 12
shapes = [
((2, 2, 3), (12, 1)),
((4, 3), (6, 1, 2)),
]
samples = []
for shape_lhs, shape_rhs in shapes:
inp = make_input().reshape(*shape_lhs, *shape_rhs).detach()
inp.requires_grad_(requires_grad)
samples.append(SampleInput(inp, kwargs=dict(ind=len(shape_lhs))))
return samples
def sample_inputs_tensorsolve(op_info, device, dtype, requires_grad, **kwargs):
a_shapes = [(2, 3, 6), (3, 4, 4, 3)]
# Zero-dim tensors are not supported in NumPy, so we skip them for now.
# NumPy is used in reference check tests.
# See https://github.com/numpy/numpy/pull/20482 for tracking NumPy bugfix.
# a_shapes += [(0, 0, 1, 2, 3, 0)]
dimss = [None, (0, 2)]
for a_shape, dims in itertools.product(a_shapes, dimss):
a = make_tensor(a_shape, dtype=dtype, device=device, requires_grad=requires_grad)
b = make_tensor(a_shape[:2], dtype=dtype, device=device, requires_grad=requires_grad)
yield SampleInput(a, args=(b,), kwargs=dict(dims=dims))
def sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs):
rhs_requires_grad = kwargs.get('rhs_requires_grad', requires_grad)
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Although most losses also support the reduce and size_average combination instead of reduce, the former is
# deprecated since 0.4.1 and thus is not tested
shapes_and_kwargs = (
((), None),
((S,), dict(reduction="mean")),
((S,), dict(reduction="sum")),
((S,), dict(reduction="none")),
((S, S), None),
((S, S, S), None),
)
for shape, kwargs in shapes_and_kwargs:
yield SampleInput(_make_tensor(shape),
args=(_make_tensor(shape, requires_grad=rhs_requires_grad),),
kwargs=kwargs)
def sample_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
batch_size = 2
num_channels = 3
modes = ("bilinear", "nearest")
align_cornerss = (False, True)
padding_modes = ("zeros", "border", "reflection")
sample_inputs = []
for dim in (2, 3):
modes_ = (*modes, "bicubic") if dim == 2 else modes
for mode, padding_mode, align_corners in itertools.product(modes_, padding_modes, align_cornerss):
sample_inputs.append(
SampleInput(
_make_tensor((batch_size, num_channels, *[S] * dim)),
args=(_make_tensor((batch_size, *[S] * dim, dim)),),
kwargs=dict(
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
)
)
)
return sample_inputs
def sample_inputs_cosine_embedding_loss(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def make_target(shape):
shape = () if len(shape) == 1 else (shape[0], )
t = torch.randint(0, 2, shape, device=device, dtype=torch.long)
# Label with -1 or 1
t = t * 2 - 1
target = t.to(dtype=dtype).detach_().requires_grad_(requires_grad)
return target
shapes = ((S, S), (S,))
reductions = ('none', 'mean', 'sum')
for s, r in product(shapes, reductions):
yield SampleInput(
make_input(s),
args=(make_input(s), make_target(s)),
kwargs=dict(reduction=r, margin=random.uniform(-1, 1))
)
def sample_inputs_ctc_loss(op_info, device, dtype, requires_grad, **kwargs):
input_length = 50
batch = 16
num_char = 20
target_length = 30
def make_log_probs(s):
t = make_tensor(s, device=device, dtype=dtype)
log_probs = t.log_softmax(2).to(device=device, dtype=dtype).detach().requires_grad_(requires_grad=requires_grad)
return log_probs
reductions = ('none', 'mean', 'sum')
zero_inf = (True, False)
for r, z in product(reductions, zero_inf):
log_probs = make_log_probs((input_length, batch, num_char))
targets = torch.randint(1, num_char, (batch, target_length), dtype=torch.long, device=device)
input_lengths = torch.full((batch, ), input_length, dtype=torch.long, device=device)
target_lengths = torch.randint(10, target_length, (batch, ), dtype=torch.long, device=device)
yield SampleInput(log_probs, args=(targets, input_lengths, target_lengths,), kwargs=dict(reduction=r, zero_infinity=z))
def sample_inputs_nll_loss(op_info, device, dtype, requires_grad, **kwargs):
shape = (2, 3)
num_classes = shape[1]
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# FIXME: Derivative wrt. weight not implemented
make_weight = partial(make_tensor, num_classes, device=device, dtype=dtype, requires_grad=False)
def make_target(shape, zeros=False):
s = (shape[0], *shape[2:]) if len(shape) > 1 else ()
if zeros:
return torch.zeros(s, device=device, dtype=torch.long)
else:
return make_tensor(s,
low=0,
high=shape[1] if len(shape) > 1 else shape[0],
device=device,
dtype=torch.long)
def gen_shape_kwargs():
# Batched, non-batched and 2d
shapes = (shape, (num_classes,), shape + (2, 2))
reductions = ('none', 'mean', 'sum')
for reduction, s in product(reductions, shapes):
yield make_input(s), make_target(s), dict(reduction=reduction)
yield make_input(s), make_target(s), dict(weight=make_weight(), reduction=reduction)
yield make_input(s), make_target(s), dict(weight=make_weight(low=0), reduction=reduction)
yield make_input(s), make_target(s), dict(weight=make_weight(high=0), reduction=reduction)
t = make_target(s)
ignore = num_classes // 2
# If "mean", nll returns NaN, so it's not differentiable at those points
if t.eq(ignore).all() and reduction == "mean":
t.fill_(0)
yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction)
yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction, weight=make_weight())
# Test ignoring all the targets
# If "mean", nll returns NaN, so it's not differentiable at those points
if reduction != "mean":
yield make_input(s), make_target(s, zeros=True), dict(ignore_index=0, reduction=reduction)
for input, target, kwargs in gen_shape_kwargs():
yield SampleInput(input, args=(target,), kwargs=kwargs)
def sample_inputs_binary_cross_entropy_with_logits(
op_info, device, dtype, requires_grad, **kwargs
):
make = partial(make_tensor, device=device, dtype=dtype)
make_prob = partial(make, low=0, high=1)
reductions = ("mean", "sum", "none")
def make_weight_shape_kwargs():
kwargs = []
for shape in ((1,), (1, S), (S), (S, S)):
kwargs.extend([((S, S), dict(reduction=reduction, weight=make(shape))) for reduction in reductions])
return kwargs
shapes_and_kwargs = [
*[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))],
*[((S, S), dict(reduction=reduction)) for reduction in reductions],
*make_weight_shape_kwargs(),
*[((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions],
*[((S, S), dict(reduction=reduction, weight=make((S, S)), pos_weight=make((S,), low=0))) for reduction in reductions],
]
for shape, kwargs in shapes_and_kwargs:
yield SampleInput(
make(shape, requires_grad=requires_grad),
args=(make_prob(shape, requires_grad=requires_grad),),
kwargs=kwargs,
)
def sample_inputs_argwhere(op_info, device, dtype, requires_grad, **kwargs):
yield SampleInput(torch.tensor([1, 0, 2, 0], dtype=dtype, device=device, requires_grad=requires_grad))
mask = torch.tensor([[0, 1, 0, 1, 0],
[1, 1, 1, 1, 0],
[0, 0, 0, 1, 0],
[1, 0, 1, 1, 0],
[1, 0, 0, 1, 0]], dtype=torch.bool, device=device)
t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad)
t[mask] = 0
yield SampleInput(t)
t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True)
t[mask] = 0
yield SampleInput(t)
t = make_tensor((S, 0), dtype=dtype, device=device, requires_grad=requires_grad)
yield SampleInput(t)
yield SampleInput(torch.zeros((S,), dtype=dtype, device=device, requires_grad=requires_grad))
yield SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad))
def _generate_sample_shape_reduction():
shapes = ((S,), (S, S), (S, S, S))
reductions = ('none', 'mean', 'sum')
for s, r in product(shapes, reductions):
yield s, r
def sample_inputs_gaussian_nll_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Set low slightly above 0 so gradcheck doesn't accidentally dip below 0
make_var = partial(make_tensor, low=0.1, device=device, dtype=dtype, requires_grad=requires_grad)
def gen_shape(shape):
yield shape
# Broadcast
yield (*shape[:-1], 1)
yield shape[:-1]
def gen_shape_kwargs():
for s, r in _generate_sample_shape_reduction():
for t_s, v_s in product(gen_shape(s), gen_shape(s)):
yield _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(reduction=r)
yield (
_make_tensor(s), _make_tensor(t_s), make_var(v_s),
dict(full=True, reduction=r)
)
yield (
_make_tensor(s), _make_tensor(t_s), make_var(v_s),
dict(eps=random.uniform(1e-6, 1e-3), reduction=r)
)
yield (
_make_tensor(s), _make_tensor(t_s), make_var(v_s),
dict(full=True, eps=random.uniform(1e-6, 1e-3), reduction=r)
)
for input, target, var, kwargs in gen_shape_kwargs():
yield SampleInput(input, args=(target, var, ), kwargs=kwargs)
def _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
for s, r in _generate_sample_shape_reduction():
yield _make_tensor(s), _make_tensor(s), dict(reduction=r)
def sample_inputs_hinge_embedding_loss(op_info, device, dtype, requires_grad, **kwargs):
for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs):
# target should contain either 1 or -1 as per docs
mask = torch.rand_like(target) > 0.5
target[mask] = 1
target[~mask] = -1
d['margin'] = random.uniform(-9, 9)
yield SampleInput(input, args=(target, ), kwargs=d)
# scalar input and target.
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(_make_tensor(()), args=(_make_tensor(()), ))
def error_inputs_hinge_embedding_loss(op, device, **kwargs):
make_input = partial(make_tensor, device=device, dtype=torch.float32)
# invalid reduction value
yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}),
error_type=ValueError, error_regex='is not a valid value')
def reference_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs)
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
for reduction in ('sum', 'mean', 'none'):
if dtype.is_floating_point: # only supports ints and floats
# NaN propagation
inp = make_input((10, ))
inp[2] = float('nan')
target = make_input((10, ))
# target should contain either 1 or -1 as per docs
mask = torch.rand_like(target) > 0.5
target[mask] = -1
target[~mask] = 1
yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction})
# Inf Handling
inp = make_input((10, ))
inp[4] = float('inf')
target = make_input((10, ))
mask = torch.rand_like(target) > 0.5
target[mask] = -1
target[~mask] = 1
yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction})
# Broadcasting
inp = make_input((5, 5))
target = make_input((1, 5))
mask = torch.rand_like(target) > 0.5
target[mask] = -1
target[~mask] = 1
yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction})
def sample_inputs_huber_loss(op_info, device, dtype, requires_grad, **kwargs):
for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs):
d['delta'] = random.uniform(1e-3, 9)
yield SampleInput(input, args=(target, ), kwargs=d)
def sample_inputs_poisson_nll_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def gen_shape_kwargs():
for s, r in _generate_sample_shape_reduction():
for li in (True, False):
for f in (True, False):
i1 = _make_tensor(s)
i2 = _make_tensor(s)
# For Poisson NLL Loss,
# target is assumed to be from
# Poisson Distribution which
# always has positive samples
t1 = _make_tensor(s, low=0)
t2 = _make_tensor(s, low=0)
if not li:
i1.abs_()
i2.abs_()
t1.abs_()
t2.abs_()
yield (
i1, t1,
dict(log_input=li, full=f, reduction=r)
)
yield (
i2, t2,
dict(log_input=li, full=f,
eps=random.uniform(1e-8, 1e-3),
reduction=r)
)
for input, target, kwargs in gen_shape_kwargs():
yield SampleInput(input, args=(target, ), kwargs=kwargs)
def sample_inputs_triplet_margin_loss(op_info, device, dtype, requires_grad, with_distance=False, **kwargs):
make = partial(make_tensor, (S, M), device=device, dtype=dtype, requires_grad=requires_grad)
kwargss = (
*[dict(margin=margin) for margin in (1e-6, 1.0, 10.0)],
dict(swap=True),
*[dict(reduction=reduction) for reduction in ("mean", "sum", "none")],
)
for kwargs in kwargss:
input = make()
args = (make(), make())
if with_distance:
kwargs["distance_function"] = torch.nn.PairwiseDistance()
yield SampleInput(input, args=args, kwargs=kwargs)
def sample_inputs_pairwise_distance(op_info, device, dtype, requires_grad, **kwargs):
make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
shape = (3,)
batched_shape = (2, *shape)
shapes_and_kwargs = [
(shape, None),
(batched_shape, None),
(shape, dict(keepdim=True)),
(batched_shape, dict(keepdim=True)),
(shape, dict(p=5.0)),
(shape, dict(p=-1.0)),
(shape, dict(eps=1.0)),
]
return [
SampleInput(make(shape), args=(make(shape),), kwargs=kwargs) for shape, kwargs in shapes_and_kwargs
]
def sample_inputs_pixel_shuffle(op_info, device, dtype, requires_grad, **kwargs):
return [
SampleInput(
make_tensor((1, 9, 2, 2), device=device, dtype=dtype, requires_grad=requires_grad),
kwargs=dict(upscale_factor=upscale_factor),
)
for upscale_factor in (1, 3)
]
def sample_inputs_pixel_unshuffle(op_info, device, dtype, requires_grad, **kwargs):
return [
SampleInput(
make_tensor((1, 1, 6, 6), device=device, dtype=dtype, requires_grad=requires_grad),
kwargs=dict(downscale_factor=downscale_factor),
)
for downscale_factor in (1, 3)
]
def sample_inputs_binary_cross_entropy(op_info, device, dtype, requires_grad, logits=False, **kwargs):
make = partial(make_tensor, device=device, dtype=dtype)
make_prob = partial(make, low=0, high=1)
reductions = ("mean", "sum", "none")
shapes_and_kwargs = [
*[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))],
*[((S, S), dict(reduction=reduction)) for reduction in reductions],
*[((S, S), dict(reduction=reduction, weight=make((S, S)))) for reduction in reductions],
]
if logits:
shapes_and_kwargs.extend(
[((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions]
)
for shape, kwargs in shapes_and_kwargs:
yield SampleInput(
(make if logits else make_prob)(shape, requires_grad=requires_grad),
args=(make_prob(shape, requires_grad=requires_grad),),
kwargs=kwargs,
)
def sample_inputs_allclose(op_info, device, dtype, requires_grad, **kwargs):
samples = []
sample_shapes = [(), (S), (S, S, S)]
atols = [1e-2, 1e-16]
rtols = [1e-1, 0.5]
eps = 1e-8
for s, rtol, atol in product(sample_shapes, rtols, atols):
# close sample
t = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad)
close = (t + atol).detach().requires_grad_(requires_grad)
close_sample = SampleInput(t, args=(close,), kwargs=dict(rtol=rtol, atol=atol))
samples.append(close_sample)
# random sample
a = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad)
b = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad)
r_sample = SampleInput(a, args=(b,), kwargs=dict(rtol=rtol, atol=atol))
samples.append(r_sample)
return samples
def sample_inputs_l1_loss(op_info, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs)
# In addition to the regular test cases, we add two for mixed floating point and complex inputs
if dtype.is_complex:
make = partial(make_tensor, (), device=device, requires_grad=requires_grad)
yield SampleInput(make(dtype=dtype), args=(make(dtype=torch.double),))
yield SampleInput(make(dtype=torch.double), args=(make(dtype=dtype),))
def sample_inputs_smooth_l1_loss(op_info, device, dtype, requires_grad, **kwargs):
yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs)
make = partial(make_tensor, (S, S), device=device, dtype=dtype, requires_grad=requires_grad)
# This test case always triggers the smooth condition, since absolute difference of input and target
# is smaller than beta
yield SampleInput(make(low=0, high=2), args=(make(low=-2, high=0),), kwargs=dict(beta=5))
yield SampleInput(make(), args=(make(),), kwargs=dict(beta=0))
def sample_inputs_kl_div(op_info, device, dtype, requires_grad, **kwargs):
make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
shapes_and_reduction = [
((2,), "mean"),
((2, 3), "mean"),
((2, 3, 4), "mean"),
((2,), "none"),
((2,), "batchmean"),
((2,), "sum"),
]
sample_inputs = []
for (shape, reduction), log_target in itertools.product(shapes_and_reduction, (True, False)):
# input should be log-probability, i.e. lie in (-inf, 0]
input = make(shape, low=None, high=0)
# target should be a probability by default, i.e. lie in [0, 1], and a log-probability if log_target is set,
# i.e. lie in (-inf, 0]
target = make(shape, low=None, high=0) if log_target else make(shape, low=0, high=1)
sample_inputs.append(
SampleInput(input, args=(target,), kwargs=dict(reduction=reduction, log_target=log_target))
)
return sample_inputs
def sample_inputs_pdist(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
yield from (SampleInput(make_input((n, m))) for n, m in itertools.product((1, S), repeat=2))
yield from (SampleInput(make_input((S, S)), kwargs=dict(p=p)) for p in (0.0, 1.0, 2.0, 10.0, float("inf")))
def reference_pdist(input, p=2):
pdist = scipy.spatial.distance.pdist
if p == 0:
output = pdist(input, "hamming") * input.shape[1]
elif p == float("inf"):
output = pdist(input, lambda x, y: np.abs(x - y).max())
else:
output = pdist(input, "minkowski", p=p)
return output.astype(input.dtype)
def sample_inputs_diagflat(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
SampleInput(make_input(())),
SampleInput(make_input((2,))),
SampleInput(make_input((2, 2))),
SampleInput(make_input((2,)), kwargs=dict(offset=1)),
SampleInput(make_input((2,)), kwargs=dict(offset=-1)),
]
def sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs):
unpool_name_to_pool_method_dict = {
'nn.functional.max_unpool1d': torch.nn.functional.max_pool1d,
'nn.functional.max_unpool2d': torch.nn.functional.max_pool2d,
'nn.functional.max_unpool3d': torch.nn.functional.max_pool3d
}
unpool_name_to_dim = {
'nn.functional.max_unpool1d': 1,
'nn.functional.max_unpool2d': 2,
'nn.functional.max_unpool3d': 3
}
unpool_to_pool_name_dict = dict((
(k, f'nn.functional.{v.__name__}') for k, v in unpool_name_to_pool_method_dict.items()
))
pool_dim = unpool_name_to_dim[op_info.name]
pool_method = unpool_name_to_pool_method_dict[op_info.name]
pool_op_info = copy.copy(op_info)
pool_op_info.name = unpool_to_pool_name_dict[op_info.name]
for sample in sample_inputs_max_pool(pool_op_info, device, dtype, requires_grad, **kwargs):
# shapes (C, ...) do not work as of now,
# see https://github.com/pytorch/pytorch/issues/68337
# TODO: remove once the issue is resolved
if sample.input.dim() != pool_dim + 2:
continue
# No dilation > 1 for max_unpool,
# see https://github.com/pytorch/pytorch/issues/68420
if sample.kwargs['dilation'] != 1:
continue
# Can't unpool without indices
if sample.kwargs['return_indices']:
pool, indices = pool_method(sample.input, **sample.kwargs)
# arg has to be a leaf
arg = pool.detach().requires_grad_(requires_grad)
sample_kwargs = {
'kernel_size': sample.kwargs['kernel_size'],
'stride': sample.kwargs['stride'],
'padding': sample.kwargs['padding'],
# output_size could be None but we specify it explicitly
# to compensate for the information lose in pool due
# to the floor/ceil operation used to compute the shapes
'output_size': sample.input.size()
}
yield SampleInput(arg, args=(indices,), kwargs=sample_kwargs)
def sample_inputs_max_unpool_grad(op_info, device, dtype, requires_grad, **kwargs):
for sample in sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs):
indices = sample.args[0]
# The samples for max_unpool are generated with max_pool.
# It could be that a single element from the max_pool's
# input is mapped to several locations in its output.
# This situation leads to failed gradchecks because
# the finite difference algorithm perturbes the elements
# of the output one by one, and not in classes of
# equivalences determined by whether two elements
# in the output are coming from the same location in the
# input (simply put, they have the same corresponding index).
# So, there are two ways to resolve this issue:
# 1. Extract a pertubation for one element and apply it all
# the elements from the same equivalence class, or
# 2. Make sure that the equivalence classes are all singletons,
# i.e. the index tensor has to be comprised of only unique
# indices.
# Here we go with the solution 2, the easiest of all.
if indices.unique().numel() == indices.numel():
yield sample
foreach_unary_op_db: List[OpInfo] = [
ForeachFuncInfo('exp'),
ForeachFuncInfo('acos'),
ForeachFuncInfo('asin'),
ForeachFuncInfo('atan'),
ForeachFuncInfo('cos'),
ForeachFuncInfo('cosh'),
ForeachFuncInfo('log'),
ForeachFuncInfo('log10'),
ForeachFuncInfo('log2'),
ForeachFuncInfo('tan'),
ForeachFuncInfo('tanh'),
ForeachFuncInfo('sin'),
ForeachFuncInfo('sinh'),
ForeachFuncInfo(
'neg',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex(),
sample_inputs_func=sample_inputs_foreach,
),
ForeachFuncInfo(
'sqrt',
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
),
ForeachFuncInfo(
'ceil',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'erf',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'erfc',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'expm1',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'floor',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'log1p',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
),
ForeachFuncInfo(
'round',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'frac',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'reciprocal',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
),
ForeachFuncInfo(
'sigmoid',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
),
ForeachFuncInfo(
'trunc',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'abs',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
]
foreach_binary_op_db: List[OpInfo] = [
ForeachFuncInfo(
"add",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
),
ForeachFuncInfo(
"sub",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
),
ForeachFuncInfo(
"mul",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
skips=(
# Ref: https://github.com/pytorch/pytorch/issues/77946
DecorateInfo(unittest.skip("Unable to reproduce failure locally"), "TestForeach",
"test_binary_op_scalarlist_fastpath",
device_type='cuda', dtypes=(torch.float16,)),
)
),
ForeachFuncInfo(
"div",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
skips=(
# Ref: https://github.com/pytorch/pytorch/issues/77946
DecorateInfo(unittest.skip("Unable to reproduce failure locally"), "TestForeach",
"test_binary_op_scalarlist_fastpath",
device_type='cuda', dtypes=(torch.float16,)),
)
),
]
foreach_pointwise_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"addcmul",
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
"addcdiv",
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
),
]
foreach_minmax_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"maximum",
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bool),
),
ForeachFuncInfo(
"minimum",
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bool),
),
]
foreach_reduce_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"norm",
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
),
]
def reference_sign(x):
if x.dtype == np.bool_:
# `np.sign` doesn't support `bool`.
# >>> np.sign(True)
# ufunc 'sign' did not contain a loop
# with signature matching types dtype('bool') -> dtype('bool')
return np.sign(x, dtype=np.uint8).astype(np.bool_)
return np.sign(x)
def reference_sgn(x):
# NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex.
# For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j.
# while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input)
if x.dtype not in [np.complex64, np.complex128]:
return reference_sign(x)
out = (x / np.abs(x))
if out.ndim == 0:
# Handle x == 0 case
if (x == 0):
# Can't assign to np.complex object
# So make a new one.
return np.array(complex(0, 0), dtype=x.dtype)
return out
# Handle x == 0 case
mask = (x == 0)
out[mask] = complex(0, 0)
return out
def reference_sigmoid(x):
# 'scipy.special.expit' not supported for the input types
if x.dtype in [np.complex64, np.complex128]:
return (1 / (1 + np.exp(-x)))
return scipy.special.expit(x)
def reference_logsigmoid(x):
return np.where(
x < 0,
x - np.log1p(np.exp(x)),
-np.log1p(np.exp(-x)))
def reference_hardsigmoid(x):
intermediate = x / 6 + 0.5
y = np.clip(intermediate, 0, None)
return np.where(y > 1, 1, y).astype(x.dtype)
def reference_lgamma(x):
# scipy.special.gammaln returns `-inf` when input is `-inf`.
# While Pytorch, C and C++, all return `inf` when input is `-inf`.
# Reference:
# https://en.cppreference.com/w/cpp/numeric/math/lgamma
# https://en.cppreference.com/w/c/numeric/math/lgamma
# To handle the above discrepancy,
# we replace -inf with inf so values
# that were originally -inf map to inf as expected
if x.dtype.kind == 'f':
x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x)
out = scipy.special.gammaln(x)
if x.dtype == np.float16:
# `scipy.special.gammaln` returns output of float32 when input is float16,
# while `torch.lgamma` preserves `float16`. But due to smaller range of float16,
# Pytorch version outputs `inf` while SciPy returns finite values.
out = out.astype(np.float16)
return out
def reference_polygamma(x, n):
# WEIRD `scipy.special.polygamma` behavior
# >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype
# dtype('float64')
# >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype
# dtype('float32')
#
# Thus we cast output to the default torch dtype or preserve double
result_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]
if x.dtype == np.double:
result_dtype = np.double
return scipy.special.polygamma(n, x).astype(result_dtype)
def reference_mvlgamma(x, d):
if x.dtype == np.float16:
return scipy.special.multigammaln(x, d).astype(np.float16)
return scipy.special.multigammaln(x, d)
def reference_softplus(input, beta=1, threshold=20):
non_linear = input * beta <= threshold
output = input.copy()
output[non_linear] = np.log(1 + np.exp(beta * input[non_linear])) / beta
return output
def reference_gelu(X, *, approximate='none'):
def _gelu_ref(X):
return X * stats.norm.cdf(X)
def _tanh_gelu_ref(X):
M_SQRT_2_PI = math.sqrt(2 / math.pi)
Z = M_SQRT_2_PI * (X + 0.044715 * np.power(X, 3.0))
return 0.5 * X * (1.0 + np.tanh(Z))
if approximate == 'tanh':
return _tanh_gelu_ref(X)
else:
return _gelu_ref(X)
def reference_one_hot(a: np.ndarray, num_classes: int = -1) -> np.ndarray:
if num_classes == -1:
num_classes = int(np.amax(a) + 1)
idcs = a.reshape(-1) + np.arange(0, a.size, dtype=np.int64) * num_classes
one_hot = np.zeros((a.size, num_classes), dtype=a.dtype)
np.put(one_hot, idcs, 1)
return one_hot.reshape(*a.shape, -1)
def reference_mse_loss(input, target, reduction="mean"):
se = (input - target) ** 2
if reduction == "mean":
return np.mean(se)
elif reduction == "sum":
return np.sum(se)
else: # reduction == "none"
return se
def wrapper_set_seed(op, *args, **kwargs):
"""Wrapper to set seed manually for some functions like dropout
See: https://github.com/pytorch/pytorch/pull/62315#issuecomment-896143189 for more details.
"""
with freeze_rng_state():
torch.manual_seed(42)
return op(*args, **kwargs)
def reference_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight=None, bias=None, eps=1e-5):
return reference_native_layer_norm(inp, normalized_shape, weight, bias, eps)[0]
def reference_native_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight, bias, eps):
feature_size = np.prod(normalized_shape)
inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload]
mean = inp_view.mean(axis=-1, keepdims=True)
var = inp_view.var(axis=-1, ddof=0, keepdims=True)
Y = (inp_view - mean) / np.sqrt(var + eps)
if weight is None and bias is not None:
Y = Y + bias.reshape(-1)
elif weight is not None and bias is None:
Y = Y * weight.reshape(-1)
elif weight is not None and bias is not None:
Y = Y * weight.reshape(-1) + bias.reshape(-1)
axis = inp.ndim - len(normalized_shape)
stat_shape = inp.shape[:axis] + (1,) * len(normalized_shape)
return Y.reshape(*inp.shape), mean.reshape(stat_shape), (1.0 / np.sqrt(var + eps)).reshape(stat_shape)
def reference_group_norm(inp: np.ndarray, num_groups: int, weight=None, bias=None, eps=1e-5):
inp_view = inp
if np.prod(inp.shape) != 0:
inp_view = inp.reshape((inp.shape[0], num_groups, -1))
mean = inp_view.mean(axis=-1, keepdims=True)
var = inp_view.var(axis=-1, ddof=0, keepdims=True)
Y = (inp_view - mean) / np.sqrt(var + eps)
Y = Y.reshape(inp.shape)
if weight is not None:
# weight is a vector of length equal to the channel
if len(Y.shape) > 2:
weight = np.tile(np.expand_dims(weight, 1), [1] + list(inp.shape[2:]))
Y = Y * weight
if bias is not None:
# bias is a vector of length equal to the channel
if len(Y.shape) > 2:
bias = np.tile(np.expand_dims(bias, 1), [1] + list(inp.shape[2:]))
Y = Y + bias
return Y
# using a custom reference function since numpy only has a string side arg (instead of right and side) and doesn't
# have an out_int32 arg. Additionally, numpy doesn't support searchsorted with ND arrays, so this splits those into
# stacked 1D cases
def reference_searchsorted(sorted_sequence, boundary, out_int32=False, right=False, side='left', sorter=None):
side = 'right' if (right or side == 'right') else 'left'
if len(sorted_sequence.shape) == 1 :
ret = np.searchsorted(sorted_sequence, boundary, side=side, sorter=sorter)
return ret.astype(np.int32) if out_int32 else ret
elif sorted_sequence.shape[0] == 0:
if sorter is not None:
sorter = sorter.flatten()
ret = np.searchsorted(sorted_sequence.flatten(), boundary.flatten(), side=side, sorter=sorter)
ret = ret.astype(np.int32) if out_int32 else ret
return ret.reshape(boundary.shape)
else:
# numpy searchsorted only supports 1D inputs so we split up ND inputs
orig_shape = boundary.shape
num_splits = np.prod(sorted_sequence.shape[:-1])
splits = range(0, num_splits)
sorted_sequence, boundary = sorted_sequence.reshape(num_splits, -1), boundary.reshape(num_splits, -1)
if sorter is not None:
sorter = sorter.reshape(num_splits, -1)
split_sequence = [sorted_sequence[i] for i in splits]
split_boundary = [boundary[i] for i in splits]
split_sorter = [sorter[i] if (sorter is not None) else None for i in splits]
split_ret = [np.searchsorted(s_seq, b, side=side, sorter=s_sort)
for (s_seq, b, s_sort) in zip(split_sequence, split_boundary, split_sorter)]
split_ret = [i.astype(np.int32) for i in split_ret] if out_int32 else split_ret
return np.stack(split_ret).reshape(orig_shape)
def gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs):
"""Gradcheck wrapper for functions that take Hermitian matrices as input.
They require a modified function because the finite-difference algorithm
for calculating derivatives does not preserve the Hermitian property of the input.
"""
return op(input + input.mH, *args, **kwargs)
def gradcheck_wrapper_triangular_input(op, *args, upper=False, idx=0, **kwargs):
"""Gradcheck wrapper for functions that take lower or upper triangular matrices as input.
They require a modified function because the finite-difference algorithm
for calculating derivatives does not preserve the triangular property of the input.
`idx` is used to specific which `args[idx]` is to be triangularized.
"""
triangular_arg = args[idx].triu() if upper else args[idx].tril()
return op(*args[:idx], triangular_arg, *args[idx + 1:], upper, **kwargs)
def gradcheck_wrapper_triangular_input_real_positive_diagonal(op, *args, upper=False, idx=0, **kwargs):
"""Gradcheck wrapper for functions that take lower/upper triangular matrices
with real and positive diagonals, for example, cholesky-like operations.
"""
arg = args[idx]
arg_diag = arg.diagonal(0, -2, -1)
arg_diag_embed = torch.diag_embed(arg_diag)
id_diag_tensor = torch.ones_like(arg_diag)
id_tensor = torch.diag_embed(id_diag_tensor)
# new_arg = arg - diag(arg) + I
new_arg = arg - arg_diag_embed + id_tensor
return gradcheck_wrapper_triangular_input(
op, *args[:idx], new_arg, *args[idx + 1:],
upper=upper, idx=idx, **kwargs
)
def gradcheck_wrapper_masked_operation(op, input, *args, **kwargs):
"""Gradcheck wrapper for masked operations.
When mask is specified, replaces masked-out elements with zeros.
Use for operations that produce non-finite masked-out elements,
for instance, for minimum and maximum reductions.
"""
output = op(input, *args, **kwargs)
mask = kwargs.get('mask')
if mask is not None:
output_mask = torch._masked._output_mask(op, input, *args, **kwargs)
output = torch.where(output_mask, output, output.new_zeros([]))
return output
def gradcheck_wrapper_masked_pointwise_operation(op, input, *args, **kwargs):
"""Gradcheck wrapper for masked pointwise operations. Assumes that the result
will be masked iff both tensors are masked at a specific index
When mask is specified, replaces masked-out elements with zeros.
Use for operations that produce non-finite masked-out elements,
for instance, for minimum and maximum reductions.
"""
output = op(input, *args, **kwargs)
input_mask = kwargs.get('input_mask')
other_mask = kwargs.get('other_mask')
if input_mask is not None and other_mask is not None:
combined_mask = torch.logical_and(input_mask, other_mask)
new_kwargs = dict(mask=combined_mask, **kwargs)
output_mask = torch._masked._input_mask(input, *args, **new_kwargs)
output = torch.where(output_mask, output, output.new_zeros([]))
return output
def reference_reduction_numpy(f, supports_keepdims=True):
"""Wraps a NumPy reduction operator.
The wrapper function will forward dim, keepdim, mask, and identity
kwargs to the wrapped function as the NumPy equivalent axis,
keepdims, where, and initiak kwargs, respectively.
Args:
f: NumPy reduction operator to wrap
supports_keepdims (bool, optional): Whether the NumPy operator accepts
keepdims parameter. If it does not, the wrapper will manually unsqueeze
the reduced dimensions if it was called with keepdim=True. Defaults to True.
Returns:
Wrapped function
"""
@wraps(f)
def wrapper(x: np.ndarray, *args, **kwargs):
# Copy keys into a set
keys = set(kwargs.keys())
dim = kwargs.pop('dim', None)
keepdim = kwargs.pop('keepdim', False)
if 'dim' in keys:
dim = tuple(dim) if isinstance(dim, Sequence) else dim
# NumPy reductions don't accept dim=0 for scalar inputs
# so we convert it to None if and only if dim is equivalent
if x.ndim == 0 and dim in {0, -1, (0,), (-1,)}:
kwargs['axis'] = None
else:
kwargs['axis'] = dim
if 'keepdim' in keys and supports_keepdims:
kwargs['keepdims'] = keepdim
if 'mask' in keys:
mask = kwargs.pop('mask')
if mask is not None:
assert mask.layout == torch.strided
kwargs['where'] = mask.cpu().numpy()
if 'identity' in keys:
identity = kwargs.pop('identity')
if identity is not None:
if identity.dtype is torch.bfloat16:
identity = identity.cpu().to(torch.float32)
else:
identity = identity.cpu()
kwargs['initial'] = identity.numpy()
if 'unbiased' in keys:
unbiased = kwargs.pop('unbiased')
if unbiased is not None:
kwargs['ddof'] = int(unbiased)
result = f(x, *args, **kwargs)
# Unsqueeze reduced dimensions if NumPy does not support keepdims
if keepdim and not supports_keepdims and x.ndim > 0:
dim = list(range(x.ndim)) if dim is None else dim
result = np.expand_dims(result, dim)
return result
return wrapper
def loss_reference_reduction_wrapper(fn):
def wrapper(input, target, *, size_average=None, reduce=None, reduction="mean", **other_kwargs):
if size_average is not None or reduce is not None:
raise RuntimeError(
"The keyword arguments 'size_average' and 'reduce' are deprecated and not supported by this wrapper"
)
output = fn(input, target, **other_kwargs)
if reduction == "mean":
return np.mean(output)
elif reduction == "sum":
return np.sum(output)
else: # reduction == "none"
return output
return wrapper
@loss_reference_reduction_wrapper
def reference_smooth_l1_loss(input, target, beta=1.0):
diff = input - target
abs_diff = np.abs(diff)
above_threshold = abs_diff >= beta
loss = np.empty_like(input)
loss[above_threshold] = abs_diff[above_threshold] - 0.5 * beta
loss[~above_threshold] = diff[~above_threshold] ** 2 / (2 * beta)
return loss
def reference_std_var(f):
"""Forwards unbiased/correction kwargs as NumPy's equivalent ddof"""
g = reference_reduction_numpy(f)
@wraps(g)
def wrapper(x: np.ndarray, *args, **kwargs):
assert not ('unbiased' in kwargs and 'correction' in kwargs)
if 'unbiased' in kwargs:
kwargs['ddof'] = int(kwargs.pop('unbiased'))
elif 'correction' in kwargs:
kwargs['ddof'] = kwargs.pop('correction')
return g(x, *args, **kwargs)
return wrapper
def generate_std_var_kwargs(t: torch.Tensor, **kwargs):
"""Generates unbiased/correction kwargs for std/var operators"""
yield ((), {'unbiased': True})
yield ((), {'unbiased': False})
# Currently, calling std with correction is only enabled when
# both dim and keepdim are provided.
if 'dim' in kwargs and 'keepdim' in kwargs:
yield ((), {'correction': 0})
yield ((), {'correction': 1})
numel = torch.tensor(t.shape)[kwargs.get('dim')].prod()
yield ((), {'correction': numel // 2})
def error_inputs_mean(op_info, device, **kwargs):
err_msg1 = (r"mean\(\): could not infer output dtype. "
r"Input dtype must be either a floating point or complex dtype. "
r"Got: Long")
si1 = SampleInput(
make_tensor((3, 4, 5), dtype=torch.int64, device=device),
args=([],))
err_msg2 = (r"mean\(\): could not infer output dtype. "
r"Optional dtype must be either a floating point or complex dtype. "
r"Got: Long")
si2 = SampleInput(
make_tensor((3, 4, 5), dtype=torch.float32, device=device),
args=([],),
kwargs={"dtype": torch.int64})
err_msg3 = "Expected out tensor to have dtype double, but got float instead"
si3 = SampleInput(
make_tensor((3, 4, 5), dtype=torch.int64, device=device),
args=([],),
kwargs={
"dtype": torch.float64,
"out": make_tensor([], dtype=torch.float32, device=device),
})
return (ErrorInput(si1, error_regex=err_msg1),
ErrorInput(si2, error_regex=err_msg2),
ErrorInput(si3, error_regex=err_msg3))
# numpy implementation of torch.flatten
# unfortunately there's no np.flatten. we figure out the desired shape and call np.reshape
def reference_flatten(input, start_dim=0, end_dim=-1):
in_shape = input.shape
in_rank = len(in_shape)
for d in start_dim, end_dim:
if not((in_rank == 0 and d in (-1, 0)) or -in_rank <= d < in_rank):
raise IndexError(f"Dimension out of range (expected to be in range of [{-in_rank}, {in_rank-1}], but got {d}")
end_dim = end_dim if end_dim >= 0 else in_rank + end_dim
start_dim = start_dim if start_dim >= 0 else in_rank + start_dim
if in_rank == 0:
end_dim = start_dim
if end_dim < start_dim:
raise RuntimeError("flatten() has invalid args: start_dim cannot come after end_dim")
flatten_bit_dim = functools.reduce(operator.mul, in_shape[start_dim:end_dim + 1], 1)
out_shape = in_shape[:start_dim] + (flatten_bit_dim,) + in_shape[end_dim + 1:]
return np.reshape(input, out_shape)
# Operator database (sorted alphabetically)
op_db: List[OpInfo] = [
UnaryUfuncInfo('abs',
aliases=('absolute', ),
ref=np.abs,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
skips=(
DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestGradients',
'test_inplace_grad', dtypes=(torch.cdouble,)),
DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestGradients',
'test_inplace_gradgrad', dtypes=(torch.cdouble,)),
DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestGradients',
'test_inplace_forward_mode_AD', dtypes=(torch.cdouble,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat]),
# Reference: https://github.com/pytorch/pytorch/issues/49224
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=[torch.int8], active_if=TEST_WITH_ASAN),
# TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input)
# We can break the logic of the loop over all possible types but it is OK.
# https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes',
dtypes=[torch.cfloat, torch.cdouble]),
),
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_forward_ad=True),
# NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952)
UnaryUfuncInfo('acos',
aliases=('arccos', ),
ref=np.arccos,
domain=(-1, 1),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-1,
torch.complex64: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
# Failing with wrong imaginary sign on at least some Windows jobs
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
# Failing with wrong imaginary sign on at least some Windows jobs
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_method_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_forward_mode_AD',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),)),
# NOTE: the derivative for inplace acosh is not implemented
UnaryUfuncInfo('acosh',
aliases=('arccosh', ),
ref=np.arccosh,
domain=(1, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
# Failing with wrong imaginary sign on at least some Windows jobs
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
),
# acosh is not defined at x < 1 (real)
reference_numerics_filter=NumericsFilter(
condition=lambda x: (x < 1 if not x.is_complex() else torch.zeros_like(x, dtype=torch.bool)),
safe_val=2)),
BinaryUfuncInfo('add',
# NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate
ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \
else np.add(input, np.multiply(alpha, other)),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16,
torch.float16, torch.chalf),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_add_sub,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
supports_two_python_scalars=True,
decorators=(
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}),
'TestBinaryUfuncs', 'test_reference_numerics'),
),
skips=(
# boolean alpha not handled properly
DecorateInfo(unittest.expectedFailure,
'TestCudaFuserOpInfo',
'test_nvfuser_correctness',
dtypes=(torch.bool,)),
# boolean alpha not handled properly
DecorateInfo(unittest.expectedFailure,
'TestNNCOpInfo',
'test_nnc_correctness',
dtypes=(torch.bool,)),
DecorateInfo(unittest.skip("Skipped!"),
'TestCommon',
'test_numpy_refs',
dtypes=(torch.complex128,)),
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_extremal_values',
dtypes=(torch.complex64, torch.complex128)),
)),
OpInfo('arange',
dtypes=all_types_and(torch.bfloat16, torch.float16),
supports_out=True,
supports_autograd=False,
is_factory_function=True,
error_inputs_func=error_inputs_arange,
sample_inputs_func=sample_inputs_arange,
skips=(
# https://github.com/pytorch/pytorch/issues/81774
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_noncontiguous_samples"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Lazy tensor failures
DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'),
DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness'),
DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'),
# Exception raised from analyzeImpl at ../torch/csrc/jit/ir/alias_analysis.cpp:608
# We don't have an op for aten::arange but it isn't a special case.
# Argument types: bool, bool, bool, int, int, Device, boo
DecorateInfo(unittest.expectedFailure, 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'),
DecorateInfo(unittest.expectedFailure, 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'),
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'),
# Captured graph does not contain aten::arange (succeeds on complex!)
# g: graph():
# %25 : Long(1, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value={1}]()
# return (%25)
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
)),
BinaryUfuncInfo('clamp_max',
ref=_clamp_max_numpy,
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_rhs_python_scalar=False,
supports_fwgrad_bwgrad=True,
rhs_make_tensor_kwargs=dict(exclude_zero=False),
skips=(
# RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
# dispatch to lazy test failed
DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'),
# test error disabled since rhs non-tensor python scalar is supported
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors'),
)),
BinaryUfuncInfo('clamp_min',
ref=_clamp_min_numpy,
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_rhs_python_scalar=False,
supports_fwgrad_bwgrad=True,
rhs_make_tensor_kwargs=dict(exclude_zero=False),
skips=(
# RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
# dispatch to lazy test failed
DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'),
# test error disabled since rhs non-tensor python scalar is supported
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors'),
)),
BinaryUfuncInfo('mul',
aliases=('multiply',),
dtypes=all_types_and_complex_and(torch.chalf, torch.float16, torch.bfloat16, torch.bool),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_two_python_scalars=True),
BinaryUfuncInfo('sub',
# NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate
ref=lambda input, other, *, alpha=1: np.subtract(input, np.multiply(alpha, other)),
aliases=('subtract',),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.chalf),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_add_sub,
supports_two_python_scalars=True,
decorators=(
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0)}),
'TestBinaryUfuncs', 'test_reference_numerics'),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}),
'TestCommon', 'test_complex_half_reference_testing', device_type='cpu'),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}),
'TestDecomp', 'test_comprehensive', device_type='cpu'),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}),
'TestDecomp', 'test_quick', device_type='cpu'),
),
skips=(
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics',
dtypes=(torch.uint8,)),
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.uint8,)),
)),
OpInfo('addmm',
# This addmm OpInfo is for when alpha and beta are not both equal to 1.
# alpha=beta=1 is tested in the following opinfo, because that special case will
# trigger addmm being decomposed by a jit pass.
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_addmm,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
)),
OpInfo('addmm',
# When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add.
variant_test_name='decomposed',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16] if(CUDA11OrLater or TEST_WITH_ROCM) else []),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
autodiff_nonfusible_nodes=['aten::add', 'aten::mm'],
sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1),
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
# https://github.com/pytorch/pytorch/issues/71784
DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness',
device_type='cpu', dtypes=(torch.float16,)),
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', dtypes=(torch.float16,)),
)),
OpInfo('addmv',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,
*[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_addmv),
OpInfo('addbmm',
ref=lambda M, batch1, batch2, beta=1, alpha=1: np.add(np.multiply(np.asarray(beta, dtype=M.dtype), M),
np.multiply(np.asarray(alpha, dtype=batch1.dtype),
np.sum(np.matmul(batch1, batch2), axis=0))),
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16]
if (SM53OrLater and CUDA11OrLater) or TEST_WITH_ROCM else []),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=1.3e-05),
torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestCommon', 'test_numpy_refs')],
skips=(
# NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater),
# addbmm does not correctly warn when resizing out= inputs
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# https://github.com/pytorch/pytorch/issues/55907
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_addbmm),
OpInfo('baddbmm',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,
*[torch.bfloat16] if CUDA11OrLater or TEST_WITH_ROCM else []),
backward_dtypesIfCUDA=floating_types_and(torch.float16,
*[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else [],
torch.complex64, torch.complex128),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestMathBits', 'test_conj_view', device_type='cuda')],
sample_inputs_func=sample_inputs_baddbmm,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
)),
OpInfo('dot',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_dot_vdot,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
)),
OpInfo('vdot',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []),
sample_inputs_func=sample_inputs_dot_vdot,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
)),
OpInfo('bmm',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16]
if (SM53OrLater and CUDA11OrLater) or TEST_WITH_ROCM else []),
assert_autodiffed=True,
assert_jit_shape_analysis=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater),
),
sample_inputs_func=sample_inputs_bmm),
OpInfo('mv',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_mv),
OpInfo('addr',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
backward_dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, *[torch.bfloat16]
if (CUDA11OrLater or TEST_WITH_ROCM) else []),
# Reference: https://github.com/pytorch/pytorch/issues/50747
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/50747
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)),
),
sample_inputs_func=sample_inputs_addr,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('addcmul',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# TODO: update sample inputs with for_inplace_variant kwarg to support this test
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
# 76047
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness',
dtypes=(torch.int8, torch.int16, torch.int32, torch.int64)),
),
sample_inputs_func=sample_inputs_addcmul_addcdiv),
OpInfo('addcdiv',
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# TODO: update sample inputs with for_inplace_variant kwarg to support this test
DecorateInfo(unittest.expectedFailure,
'TestCommon',
'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_addcmul_addcdiv),
UnaryUfuncInfo('asin',
aliases=('arcsin', ),
ref=np.arcsin,
domain=(-1, 1),
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}),
'TestUnaryUfuncs', device_type='cuda'),
precisionOverride({torch.bfloat16: 1e-2}),
],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
# NOTE: derivative for inplace asinh is not implemented
UnaryUfuncInfo('asinh',
aliases=('arcsinh', ),
ref=np.arcsinh,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
UnaryUfuncInfo('atan',
aliases=('arctan', ),
ref=np.arctan,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
active_if=TEST_WITH_ROCM, device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
active_if=TEST_WITH_ROCM, device_type='cuda', dtypes=[torch.complex128]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
BinaryUfuncInfo('atan2',
aliases=('arctan2',),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
supports_rhs_python_scalar=False,
skips=(
# Incorrectly attempts to use a scalar for the second argument
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),
)),
UnaryUfuncInfo('atanh',
aliases=('arctanh', ),
ref=np.arctanh,
domain=(-1, 1),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.cfloat],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
active_if=TEST_WITH_ROCM, device_type='cuda', dtypes=[torch.complex128]),
)),
OpInfo('allclose',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
ref=np.allclose,
supports_autograd=False,
supports_forward_ad=False,
sample_inputs_func=sample_inputs_allclose,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
),
supports_out=False),
OpInfo('broadcast_to',
ref=np.broadcast_to,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_broadcast_to),
OpInfo('broadcast_shapes',
op=torch.broadcast_shapes,
ref=np.broadcast_shapes if np.lib.NumpyVersion(np.__version__) >= '1.20.0' else None,
dtypes=_dispatch_dtypes((torch.float32,)),
supports_out=False,
supports_gradgrad=False,
assert_autodiffed=False,
supports_autograd=False,
supports_scripting=False,
sample_inputs_func=sample_inputs_broadcast_shapes,
skips=(
# https://github.com/pytorch/pytorch/issues/64997
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# skip dtype tests since broadcast_shape is not device dependent.
# having dtypes limited to torch.float32 would cause test_dtypes to report unexpected success
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'),
# skip these tests since we have non tensor input
DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"),
DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('broadcast_tensors',
ref=np.broadcast_arrays,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_broadcast_tensors,
reference_inputs_func=reference_inputs_broadcast_tensors,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
# https://github.com/pytorch/pytorch/issues/64997
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
)),
OpInfo('block_diag',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# Default batching rule in core doesn't work for ops with TensorList args
check_batched_forward_grad=False,
skips=(
# https://github.com/pytorch/pytorch/issues/64997
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
sample_inputs_func=sample_inputs_block_diag),
UnaryUfuncInfo('bitwise_not',
ref=np.bitwise_not,
dtypes=integral_types_and(torch.bool),
operator_variant=operator.invert,
supports_autograd=False),
BinaryUfuncInfo('bitwise_left_shift',
op=torch.bitwise_left_shift,
dtypes=integral_types(),
dtypesIfCUDA=integral_types(),
operator_variant=operator.lshift,
inplace_operator_variant=operator.ilshift,
supports_autograd=False,
supports_one_python_scalar=True,
rhs_make_tensor_kwargs=dict(low=0),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'),
)),
BinaryUfuncInfo('bitwise_right_shift',
op=torch.bitwise_right_shift,
dtypes=integral_types(),
dtypesIfCUDA=integral_types(),
operator_variant=operator.rshift,
inplace_operator_variant=operator.irshift,
supports_autograd=False,
supports_one_python_scalar=True,
rhs_make_tensor_kwargs=dict(low=0),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'),
)),
OpInfo('combinations',
op=torch.combinations,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
supports_out=False,
sample_inputs_func=sample_inputs_combinations),
OpInfo('cartesian_prod',
op=torch.cartesian_prod,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_cartesian_prod,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270
DecorateInfo(unittest.expectedFailure,
'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
)),
OpInfo('cdist',
dtypes=floating_types(),
supports_out=False,
supports_gradgrad=False,
assert_autodiffed=False,
sample_inputs_func=sample_inputs_cdist),
UnaryUfuncInfo('ceil',
ref=np.ceil,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
assert_autodiffed=True),
OpInfo('cholesky',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],),
OpInfo('cholesky_inverse',
dtypes=floating_and_complex_types(),
backward_dtypes=floating_and_complex_types(),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
check_batched_gradgrad=True,
sample_inputs_func=sample_inputs_linalg_cholesky_inverse,
gradcheck_wrapper=gradcheck_wrapper_triangular_input_real_positive_diagonal,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# Strides are not the same! Original strides were ((4, 2, 1),) and strides are now ((4, 1, 2),)
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),)),
OpInfo('cholesky_solve',
op=torch.cholesky_solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_cholesky_solve,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]),
OpInfo('chunk',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
sample_inputs_func=sample_inputs_chunk,
reference_inputs_func=reference_inputs_chunk,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('clone',
ref=np.copy,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
sample_inputs_func=sample_inputs_clone_contiguous,
reference_inputs_func=reference_inputs_clone_contiguous,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
skips=(
# TypeError: _copy_dispatcher() got an unexpected keyword argument 'memory_format'
# (NumPy reference needs to be extended with memory_format)
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref'),
),),
OpInfo('contiguous',
op=lambda x, *args, **kwargs: x.contiguous(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
sample_inputs_func=sample_inputs_clone_contiguous,
reference_inputs_func=reference_inputs_clone_contiguous,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_fusible_nodes=['aten::contiguous'],
assert_jit_shape_analysis=True,
supports_out=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
)),
OpInfo('sum_to_size',
op=lambda x, *args, **kwargs: x.sum_to_size(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_sum_to_size,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float,)),),),
OpInfo('symeig',
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_symeig,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off]),
OpInfo('clamp',
aliases=('clip',),
ref=_clamp_numpy,
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_clamp,
reference_inputs_func=partial(reference_inputs_elementwise_ternary, sample_inputs_func=sample_inputs_clamp),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# nvFuser and NNC appear to not handle boolean clamp
DecorateInfo(unittest.expectedFailure,
'TestCudaFuserOpInfo',
'test_nvfuser_correctness',
dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure,
'TestNNCOpInfo',
'test_nnc_correctness',
dtypes=(torch.bool,)),
)),
UnaryUfuncInfo('positive',
ref=np.positive,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
UnaryUfuncInfo('conj',
ref=np.conj,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16,
torch.half, torch.chalf),
supports_sparse=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
supports_out=False),
UnaryUfuncInfo('conj_physical',
decomp_aten_name='_conj_physical',
ref=np.conj,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16,
torch.half, torch.chalf),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
skips=(
# RuntimeError: inputSet && outputSet
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":118,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )),
DecorateInfo(unittest.skip("Skipped! conj_physical_ not implemented for sparse"),
'TestSparseUnaryUfuncs', 'test_inplace'),
)),
OpInfo('resolve_conj',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_view_as_real,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
),
OpInfo('resolve_neg',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_view_as_real,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
),
OpInfo('view_as_real',
dtypes=complex_types(),
supports_forward_ad=True,
supports_out=False,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_view_as_real,
test_conjugated_samples=False,
),
OpInfo('view_as_complex',
dtypes=floating_types_and(torch.half),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
test_neg_view=False,
sample_inputs_func=sample_inputs_view_as_complex,
skips=(
# RuntimeError: Tensor must have a last dimension with stride 1
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_noncontiguous_samples"),
# RuntimeError: "eq_cpu" not implemented for 'ComplexHalf'
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.half,)),
# RuntimeError: "eq_cpu" not implemented for 'ComplexHalf'
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', dtypes=(torch.half,)),
)),
BinaryUfuncInfo('complex',
dtypes=floating_types_and(torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_rhs_python_scalar=False,
skips=(
# Test doesn't account for complex's type promotion semantics
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps'),
)),
BinaryUfuncInfo('copysign',
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
promotes_int_to_float=True,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
OpInfo('corrcoef',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half,
*[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []),
sample_inputs_func=sample_inputs_corrcoef,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'),
),
supports_out=False),
UnaryUfuncInfo('cos',
ref=np.cos,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
handles_large_floats=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS),
# This fails on CUDA but passes on ROCm
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.cdouble,), device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
# AssertionError: Tensor-likes are not close!
# Greatest absolute difference: nan at index (700,) (up to 1e-05 allowed)
# Greatest relative difference: nan at index (700,) (up to 0.001 allowed)
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda',
dtypes=(torch.chalf,), active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('cosh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/48641
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.int8]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
# AssertionError: Tensor-likes are not close!
# Greatest absolute difference: nan at index (6000,) (up to 1e-05 allowed)
# Greatest relative difference: nan at index (6000,) (up to 0.001 allowed)
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda',
dtypes=(torch.chalf,), active_if=IS_WINDOWS),
)),
OpInfo('cov',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half,
*[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []),
backward_dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16]
if (CUDA11OrLater or TEST_WITH_ROCM) else []),
sample_inputs_func=sample_inputs_cov,
error_inputs_func=error_inputs_cov,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'),
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'),
# Float did not match double
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'),
# Jacobian mismatch
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_forward_mode_AD'),
DecorateInfo(unittest.skip("Barely fails"), 'TestGradients', 'test_fn_fwgrad_bwgrad'),
# JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507)
# RuntimeError:
# undefined value tensor:
# File "<string>", line 3
# def the_method(i0):
# return torch.cov(i0, correction=0, fweights=None, aweights=tensor([0.0518, 0.4681], dtype=torch.float32, requires_grad=True)) # noqa: B950
# ~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'),
)),
OpInfo('cross',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half),
sample_inputs_func=sample_inputs_cross,
supports_fwgrad_bwgrad=True,
supports_out=True,
supports_forward_ad=True),
OpInfo('linalg.cross',
ref=lambda x, y, dim=-1: np.cross(x, y, axis=dim),
op=torch.linalg.cross,
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half),
aten_name='linalg_cross',
sample_inputs_func=sample_inputs_cross,
supports_out=True,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True),
OpInfo('cumsum',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# cumsum does not handle correctly out= dtypes
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
),
sample_inputs_func=sample_inputs_cumulative_ops),
OpInfo('cumprod',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# cumprod does not handle correctly out= dtypes
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
),
# gradgradcheck fails in fast_mode=True: #56275
sample_inputs_func=sample_inputs_cumprod,
gradcheck_fast_mode=False),
OpInfo('cummax',
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('cummin',
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
UnaryUfuncInfo('deg2rad',
ref=np.radians,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.bfloat16]),
)),
OpInfo('diff',
op=torch.diff,
# np.diff has np._NoValue as default values for prepend and append, compare_with_reference breaks if prepend/append
# are set as None when converting to numpy
ref=lambda input, n=1, dim=-1, prepend=np._NoValue, append=np._NoValue: (
np.diff(input, n, dim, np._NoValue if prepend is None else prepend, np._NoValue if append is None else append)
),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_diff,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
)),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='no_rounding_mode',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
promotes_int_to_float=True,
supports_fwgrad_bwgrad=True,
supports_two_python_scalars=True,
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True),),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='trunc_rounding',
dtypes=all_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_elementwise_binary, sample_kwargs=dict(rounding_mode="trunc")),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
promotes_int_to_float=True,
supports_fwgrad_bwgrad=True,
supports_two_python_scalars=True,
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True),
skips=(
# RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'),
)),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='floor_rounding',
dtypes=all_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_elementwise_binary, sample_kwargs=dict(rounding_mode="floor")),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
promotes_int_to_float=True,
supports_fwgrad_bwgrad=True,
supports_two_python_scalars=True,
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True),
skips=(
# RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'),
)),
BinaryUfuncInfo('true_divide',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_forward_ad=True,
promotes_int_to_float=True,
supports_fwgrad_bwgrad=True,
supports_two_python_scalars=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
OpInfo('equal',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
ref=lambda input, other: (input == other).all(),
sample_inputs_func=sample_inputs_equal,
supports_autograd=False,
supports_tracing=False,
skips=(
)),
UnaryUfuncInfo('exp',
ref=np_unary_ufunc_integer_promotion_wrapper(np.exp),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50093#pullrequestreview-561791547
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.bfloat16, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=[torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/issues/48010
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
OpInfo('expand',
op=lambda self, shape: self.expand(shape),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_expand,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
supports_out=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
)),
OpInfo('expand_as',
op=lambda self, other: self.expand_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_expand_as,
supports_out=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),),
),
OpInfo('diag',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_diag,
error_inputs_func=error_inputs_diag),
OpInfo('diag_embed',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
supports_out=False,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_diagonal_diag_embed),
OpInfo('diagonal',
# They are not strictly aliases as they have diverging defaults, but we can see them as aliases for testing purposes
# If we add tests that test the function against the alias, make linalg.diagonal into its own OpInfo
aliases=('linalg.diagonal',),
aten_backward_name='diagonal_backward',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_diagonal_diag_embed),
OpInfo('diagonal_scatter',
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_diagonal_scatter),
BinaryUfuncInfo('eq',
ref=np.equal,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
always_returns_bool=True,
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops,
skips=(
)),
BinaryUfuncInfo('fmax',
op=torch.fmax,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_rhs_python_scalar=False,
skips=(
# RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'),
)),
BinaryUfuncInfo('fmin',
op=torch.fmin,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_rhs_python_scalar=False,
skips=(
# RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'),
)),
BinaryUfuncInfo('fmod',
ref=np.fmod,
dtypes=all_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=None,
rhs_make_tensor_kwargs={'exclude_zero': True},
decorators=(
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_contig_vs_every_other',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_non_contig',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.uint8,)),
)),
BinaryUfuncInfo('remainder',
ref=np.remainder,
dtypes=all_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=None,
operator_variant=operator.mod,
inplace_operator_variant=operator.imod,
supports_one_python_scalar=True,
rhs_make_tensor_kwargs={'exclude_zero': True},
decorators=(
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_contig_vs_every_other',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_non_contig',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.uint8,)),
# Fails on XLA
# False is not true : Tensors failed to compare as equal!
# Attempted to compare equality of tensors with different dtypes
DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),
)),
UnaryUfuncInfo('frac',
ref=lambda x: np.modf(x)[0],
dtypes=floating_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64)),
# 76047
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness',
dtypes=(torch.float32, torch.float64)),
)),
SpectralFuncInfo('fft.fft',
aten_name='fft_fft',
decomp_aten_name='_fft_c2c',
ref=np.fft.fft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and_complex_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
),
SpectralFuncInfo('fft.fft2',
aten_name='fft_fft2',
ref=np.fft.fft2,
decomp_aten_name='_fft_c2c',
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and_complex_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[precisionOverride(
{torch.float: 1e-4, torch.cfloat: 1e-4})],
),
SpectralFuncInfo('fft.fftn',
aten_name='fft_fftn',
decomp_aten_name='_fft_c2c',
ref=np.fft.fftn,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and_complex_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[precisionOverride(
{torch.float: 1e-4, torch.cfloat: 1e-4})]),
SpectralFuncInfo('fft.hfft',
aten_name='fft_hfft',
decomp_aten_name='_fft_c2r',
ref=np.fft.hfft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and_complex_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
check_batched_gradgrad=False,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)
),
)),
SpectralFuncInfo('fft.hfft2',
aten_name='fft_hfft2',
decomp_aten_name='_fft_c2r',
ref=scipy.fft.hfft2 if has_scipy_fft else None,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and_complex_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_gradgrad=False,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),
'TestFFT', 'test_reference_nd')],
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness'
),
)),
SpectralFuncInfo('fft.hfftn',
aten_name='fft_hfftn',
decomp_aten_name='_fft_c2r',
ref=scipy.fft.hfftn if has_scipy_fft else None,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and_complex_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_gradgrad=False,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),
'TestFFT', 'test_reference_nd'), ],
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness'
),
)),
SpectralFuncInfo('fft.rfft',
aten_name='fft_rfft',
decomp_aten_name='_fft_r2c',
ref=np.fft.rfft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_grad=False,
skips=(
),
check_batched_gradgrad=False),
SpectralFuncInfo('fft.rfft2',
aten_name='fft_rfft2',
decomp_aten_name='_fft_r2c',
ref=np.fft.rfft2,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[
precisionOverride({torch.float: 1e-4}),
],),
SpectralFuncInfo('fft.rfftn',
aten_name='fft_rfftn',
decomp_aten_name='_fft_r2c',
ref=np.fft.rfftn,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[
precisionOverride({torch.float: 1e-4}),
],),
SpectralFuncInfo('fft.ifft',
aten_name='fft_ifft',
decomp_aten_name='_fft_c2c',
ref=np.fft.ifft,
ndimensional=SpectralFuncType.OneD,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)),),
SpectralFuncInfo('fft.ifft2',
aten_name='fft_ifft2',
decomp_aten_name='_fft_c2c',
ref=np.fft.ifft2,
ndimensional=SpectralFuncType.TwoD,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)),
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.ifftn',
aten_name='fft_ifftn',
decomp_aten_name='_fft_c2c',
ref=np.fft.ifftn,
ndimensional=SpectralFuncType.ND,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)),
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.ihfft',
aten_name='fft_ihfft',
decomp_aten_name='_fft_r2c',
ref=np.fft.ihfft,
ndimensional=SpectralFuncType.OneD,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)),
skips=(
),
check_batched_grad=False),
SpectralFuncInfo('fft.ihfft2',
aten_name='fft_ihfft2',
decomp_aten_name='_fft_r2c',
ref=scipy.fft.ihfftn if has_scipy_fft else None,
ndimensional=SpectralFuncType.TwoD,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=(
# The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]).
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
DecorateInfo(precisionOverride({torch.float: 2e-4}), 'TestFFT', 'test_reference_nd'),
# Mismatched elements!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warnings'))),
SpectralFuncInfo('fft.ihfftn',
aten_name='fft_ihfftn',
decomp_aten_name='_fft_r2c',
ref=scipy.fft.ihfftn if has_scipy_fft else None,
ndimensional=SpectralFuncType.ND,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archss
dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[
# The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]).
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# Mismatched elements!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
DecorateInfo(
precisionOverride({torch.float: 2e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.irfft',
aten_name='fft_irfft',
decomp_aten_name='_fft_c2r',
ref=np.fft.irfft,
ndimensional=SpectralFuncType.OneD,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)),
check_batched_gradgrad=False),
SpectralFuncInfo('fft.irfft2',
aten_name='fft_irfft2',
decomp_aten_name='_fft_c2r',
ref=np.fft.irfft2,
ndimensional=SpectralFuncType.TwoD,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.irfftn',
aten_name='fft_irfftn',
decomp_aten_name='_fft_c2r',
ref=np.fft.irfftn,
ndimensional=SpectralFuncType.ND,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool),
# rocFFT doesn't support Half/Complex Half Precision FFT
# CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs
dtypesIfCUDA=all_types_and_complex_and(
torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
OpInfo('fft.fftshift',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
sample_inputs_func=sample_inputs_fftshift,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
OpInfo('fft.ifftshift',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
sample_inputs_func=sample_inputs_fftshift,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
OpInfo('stft',
decorators=[
skipCPUIfNoFFT,
DecorateInfo(unittest.skip("Skipped! stft does not match the native function"),
'TestJit', 'test_variant_consistency_jit'),
],
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_stft,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
),
OpInfo('istft',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_istft,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_out=False,
decorators=(
DecorateInfo(unittest.skip("Skipped! istft does not match the native function"),
'TestJit', 'test_variant_consistency_jit'),
),
skips=(
skipCPUIfNoFFT,
# gradcheck fails on ROCm (gh-68429)
# grad is computed improperly (probably for weights tensor)
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'),
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
)),
UnaryUfuncInfo('floor',
ref=np.floor,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
assert_autodiffed=True),
OpInfo('flip',
op=torch.flip,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_flip,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('fliplr',
op=torch.fliplr,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_fliplr_flipud,
error_inputs_func=error_inputs_fliplr,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('flipud',
op=torch.flipud,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_fliplr_flipud,
error_inputs_func=error_inputs_flipud,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('sparse.sampled_addmm',
dtypes=floating_and_complex_types(),
supports_autograd=True,
sample_inputs_func=sample_inputs_sparse_sampled_addmm,
decorators=[
skipCUDAIf(_get_torch_cuda_version() < (11, 3), "cusparseSDDMM was added in 11.2.1"),
skipCPUIfNoMklSparse, ],
skips=(
# NotImplementedError: Tensors of type SparseCsrTensorImpl do not have is_contiguous
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# RuntimeError: Sparse CSR tensors do not have strides.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'),
# RuntimeError: sampled_addmm: Expected result to have sparse csr layout, but got Strided
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out_warning'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
# RuntimeError: Sparse CSR tensors do not have strides
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# RuntimeError: unsupported memory format option Preserve
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# GradcheckError: gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_fwgrad_bwgrad'),
# GradcheckError: gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'),
# GradcheckError: gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
# GradcheckError: gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'),
)),
UnaryUfuncInfo('i0',
ref=np_unary_ufunc_integer_promotion_wrapper(
scipy.special.i0) if TEST_SCIPY else None,
aliases=('special.i0',),
decorators=(precisionOverride({torch.bfloat16: 3e-1,
torch.float16: 5e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
backward_dtypes=floating_types(),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_i0_i1,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.int8,)),
)),
UnaryUfuncInfo('special.i0e',
aten_name='special_i0e',
ref=scipy.special.i0e if TEST_SCIPY else None,
decorators=(precisionOverride({torch.bfloat16: 3e-1,
torch.float16: 3e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
backward_dtypes=floating_types(),
sample_inputs_func=sample_inputs_i0_i1,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
UnaryUfuncInfo('special.i1',
aten_name='special_i1',
ref=np_unary_ufunc_integer_promotion_wrapper(scipy.special.i1) if TEST_SCIPY else None,
dtypes=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool),
sample_inputs_func=sample_inputs_i0_i1,
decorators=(
DecorateInfo(toleranceOverride({
torch.float32: tol(atol=1e-4, rtol=0),
torch.bool: tol(atol=1e-4, rtol=0)})),
),
skips=(
DecorateInfo(unittest.skip("Incorrect result!"),
'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=(torch.int8,)),
),
supports_fwgrad_bwgrad=True,
supports_forward_ad=True),
UnaryUfuncInfo('special.i1e',
aten_name='special_i1e',
ref=scipy.special.i1e if TEST_SCIPY else None,
dtypes=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool),
sample_inputs_func=sample_inputs_i0_i1,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
UnaryUfuncInfo('special.ndtr',
aten_name='special_ndtr',
decorators=(precisionOverride({torch.bfloat16: 5e-3,
torch.float16: 5e-4}),),
ref=scipy.special.ndtr if TEST_SCIPY else None,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Dispatch stub: unsupported device typemeta
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', device_type='meta'),
)),
BinaryUfuncInfo('floor_divide',
ref=_floor_divide_np,
dtypes=all_types_and(torch.half, torch.bfloat16),
supports_autograd=False,
rhs_make_tensor_kwargs=dict(exclude_zero=True),
supports_two_python_scalars=True,
skips=(
# AssertionError: Results of original model and exported/imported version of model differed
DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'),
# bfloat16 floor_divide compared with a float32 reference works inconsistently
DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs',
dtypes=(torch.bfloat16,)),
# int8 floor divide has different results for -128 // -1 vs. NumPy
DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_small_values',
dtypes=(torch.int8,)),
# The following tests fails on some jobs
DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values',
dtypes=(torch.float16,)),
)),
UnaryUfuncInfo('frexp',
op=torch.frexp,
ref=np.frexp,
dtypes=floating_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
# skip testing torch.frexp as it is not supported by ROCm platform yet
decorators=[],
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs,
# while theses tests currently requires output to a single tensor.
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_non_contig_expand'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
# skips test_reference_numerics due to error in Windows CI.
# The np.frexp returns exponent as np.intc dtype on Windows platform,
# and np.intc does not have the correspond torch dtype
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
active_if=IS_WINDOWS),
)),
BinaryUfuncInfo('ge',
ref=np.greater_equal,
aliases=('greater_equal',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
always_returns_bool=True,
supports_autograd=False,
skips=(
)),
OpInfo('geqrf',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_qr_geqrf,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
supports_autograd=False,
skips=(
# FIXME: geqrf can't forward with complex inputs that require grad
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'),
# Strides are not the same!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
)),
BinaryUfuncInfo('gt',
ref=np.greater,
aliases=('greater',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
always_returns_bool=True,
supports_autograd=False,
skips=(
)),
UnaryUfuncInfo('imag',
ref=np.imag,
dtypes=complex_types_and(torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/issues/66357
# RuntimeError: view_as_real doesn't work on unresolved conjugated tensors.
check_batched_forward_grad=False,
skips=(
# Skip since real and imag don't have out variants.
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
)),
OpInfo('gradient',
dtypes=floating_and_complex_types_and(torch.int8, torch.int16,
torch.int32, torch.int64,
torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# following tests give a runtime error with undefined value tensor
# see discussion : https://github.com/pytorch/pytorch/issues/56660
# RuntimeError:
# Arguments for call are not valid.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)), # noqa: B950
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
),
supports_inplace_autograd=False,
sample_inputs_func=sample_inputs_gradient,
error_inputs_func=error_inputs_gradient),
OpInfo('inverse',
op=torch.inverse,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
# Strides are not the same!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', '.test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', '.test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
OpInfo('isin',
dtypes=all_types(),
dtypesIfCUDA=all_types_and(torch.half),
supports_autograd=False,
sample_inputs_func=sample_inputs_isin),
OpInfo('kthvalue',
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_kthvalue,
error_inputs_func=error_inputs_kthvalue),
BinaryUfuncInfo('le',
ref=np.less_equal,
aliases=('less_equal',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
always_returns_bool=True,
supports_autograd=False,
skips=(
)),
OpInfo('linalg.det',
op=torch.linalg.det,
aliases=('det',),
dtypes=floating_and_complex_types(),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
aten_name='linalg_det',
sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack,
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)}))],
check_batched_gradgrad=False,
supports_inplace_autograd=False),
OpInfo('linalg.det',
op=torch.linalg.det,
variant_test_name='singular',
aliases=('det',),
dtypes=double_types(),
backward_dtypes=double_types(),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
aten_name='linalg_det',
sample_inputs_func=sample_inputs_linalg_det_singular,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack,
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)}))],
check_batched_gradgrad=False,
supports_inplace_autograd=False,
skips=(
DecorateInfo(unittest.skip("Skipped!"), "TestGradients", 'test_fn_fwgrad_bwgrad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
# dtypes are tested in the suite above, no need to repeat it for singular
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
)),
OpInfo('linalg.cholesky',
aten_name='linalg_cholesky',
dtypes=floating_and_complex_types(),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],),
OpInfo('linalg.cholesky_ex',
aten_name='linalg_cholesky_ex',
dtypes=floating_and_complex_types(),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
),
OpInfo('linalg.vecdot',
aten_name='linalg_vecdot',
ref=lambda x, y, *, dim=-1: (x.conj() * y).sum(dim),
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,
*[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []),
sample_inputs_func=sample_inputs_linalg_vecdot,
check_batched_forward_grad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
)),
OpInfo('linalg.cond',
aten_name='linalg_cond',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_cond,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],),
OpInfo('linalg.eig',
aten_name='linalg_eig',
op=torch.linalg.eig,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_eig,
check_batched_forward_grad=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# AssertionError: Scalars are not equal!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off],
),
OpInfo('linalg.eigvals',
aten_name='linalg_eigvals',
op=torch.linalg.eigvals,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_invertible,
check_batched_forward_grad=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'),
# exits early on eager extremal value test
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
OpInfo('linalg.eigh',
aten_name='linalg_eigh',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_eigh,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
check_batched_forward_grad=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
OpInfo('linalg.eigvalsh',
aten_name='linalg_eigvalsh',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_eigh,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
check_batched_forward_grad=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
OpInfo('linalg.householder_product',
aten_name='linalg_householder_product',
op=torch.linalg.householder_product,
aliases=('orgqr', ),
dtypes=floating_and_complex_types(),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
# TODO: backward uses in-place operations that vmap doesn't like
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_householder_product,
decorators=[
skipCUDAIfNoCusolver, skipCPUIfNoLapack,
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)})),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'),
]),
OpInfo('linalg.ldl_factor',
aten_name='linalg_ldl_factor',
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_ldl_factor,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, skipCUDAIfRocm],
),
OpInfo('linalg.ldl_factor_ex',
aten_name='linalg_ldl_factor_ex',
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_ldl_factor,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, skipCUDAIfRocm],
),
OpInfo('linalg.ldl_solve',
aten_name='linalg_ldl_solve',
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_ldl_solve,
decorators=[
skipCUDAIf(_get_torch_cuda_version() < (11, 4), "not available before CUDA 11.3.1"),
skipCUDAIfNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
),
OpInfo('linalg.lstsq',
aten_name='linalg_lstsq',
dtypes=floating_and_complex_types(),
supports_out=True,
sample_inputs_func=sample_inputs_linalg_lstsq,
error_inputs_func=error_inputs_lstsq,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# we skip gradient checks for this suite as they are tested in
# variant_test_name='grad_oriented'
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
# The values for attribute 'shape' do not match
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
OpInfo('linalg.lstsq',
aten_name='linalg_lstsq',
variant_test_name='grad_oriented',
# gradchecks for forward AD fails with multi-Tensor outputs
op=lambda a, b, driver: torch.linalg.lstsq(a, b, driver=driver)[0],
supports_out=False,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_lstsq,
error_inputs_func=error_inputs_lstsq_grad_oriented,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# tests do not work with passing lambda for op
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
OpInfo('linalg.matrix_power',
aliases=('matrix_power',),
aten_name='linalg_matrix_power',
dtypes=floating_and_complex_types(),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_grad=False,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],
sample_inputs_func=sample_inputs_linalg_matrix_power,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# Strides are not the same!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
)),
OpInfo('linalg.multi_dot',
# Need this lambda because gradcheck does not work with TensorList inputs
aten_name='linalg_multi_dot',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,
*[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []),
supports_inplace_autograd=False,
# Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407)
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_linalg_multi_dot,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# https://github.com/pytorch/pytorch/issues/67470
DecorateInfo(unittest.skip("67470!"), 'TestCommon', 'test_noncontiguous_samples'),
# Fails on XLA.
# AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),
# https://github.com/pytorch/pytorch/issues/71774
DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness',
device_type='cpu', dtypes=(torch.long,)),
)),
# NB: linalg.norm has two variants so that different skips can be used for different sample inputs
OpInfo('linalg.norm',
aten_name='linalg_norm',
op=torch.linalg.norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],
sample_inputs_func=sample_inputs_linalg_norm,
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True),
OpInfo('linalg.norm',
op=torch.linalg.norm,
variant_test_name='subgradients_at_zero',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],
sample_inputs_func=partial(sample_inputs_linalg_norm, variant='subgradient_at_zero'),
aten_name='linalg_norm',
supports_forward_ad=True,
# torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got:
# Could not allocate memory to change Tensor SizesAndStrides!
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
skips=(
# [NEW] Skips specifically for sample inputs at zero
# norm's vjp/jvp are not well-conditioned near zero
DecorateInfo(unittest.expectedFailure, "TestGradients", 'test_fn_gradgrad'),
DecorateInfo(unittest.expectedFailure, "TestGradients", 'test_fn_fwgrad_bwgrad')
)),
OpInfo('linalg.matrix_norm',
aten_name='linalg_matrix_norm',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
check_batched_forward_grad=False,
check_batched_gradgrad=False,
supports_fwgrad_bwgrad=True,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],
sample_inputs_func=sample_inputs_linalg_matrix_norm),
OpInfo('linalg.qr',
aten_name='linalg_qr',
op=torch.linalg.qr,
dtypes=floating_and_complex_types(),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# In-place ops
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_qr_geqrf,
decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]),
OpInfo('linalg.slogdet',
aten_name='linalg_slogdet',
op=torch.linalg.slogdet,
dtypes=floating_and_complex_types(),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]),
OpInfo('linalg.vander',
aten_name='linalg_vander',
ref=np_vander_batched,
op=torch.linalg.vander,
dtypes=all_types_and_complex(),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
skips=(
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
),
sample_inputs_func=sample_inputs_linalg_vander),
ReductionOpInfo(
'linalg.vector_norm',
op=torch.linalg.vector_norm,
identity=0,
nan_policy='propagate',
supports_multiple_dims=True,
complex_to_real=True,
supports_forward_ad=True,
# torch.autograd.gradcheck.GradcheckError: While computing batched gradients
# got: Could not allocate memory to change Tensor SizesAndStrides!
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
generate_args_kwargs=sample_kwargs_vector_norm,
aten_name='linalg_vector_norm',
skips=(
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
)),
OpInfo('linspace',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
is_factory_function=True,
supports_out=True,
supports_autograd=False,
error_inputs_func=error_inputs_linspace,
sample_inputs_func=sample_inputs_linspace,
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_noncontiguous_samples"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Same failure as arange: cannot find linspace in captured graph
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# cpu implementation is wrong on some integral types
# https://github.com/pytorch/pytorch/issues/81996
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick',
dtypes=(torch.int16, torch.int32, torch.int64), device_type="cpu"),
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_comprehensive',
dtypes=(torch.int16, torch.int32, torch.int64), device_type="cpu"),
# cuda implementation is off-by-one on some inputs due to precision issues
# https://github.com/pytorch/pytorch/issues/82230
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick',
dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64),
device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_comprehensive',
dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64),
device_type="cuda"),
# UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API
# in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64!
# Caching allocator allocated memory was 0 and is now reported as 307200 on device 0.
# CUDA driver allocated memory was 1254555648 and is now 1242955776.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
dtypes=(torch.cfloat,), device_type="cuda"),
)),
OpInfo('logspace',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
is_factory_function=True,
supports_out=True,
supports_autograd=False,
error_inputs_func=error_inputs_linspace,
sample_inputs_func=sample_inputs_logpace,
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_noncontiguous_samples"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Same failure as arange: cannot find linspace in captured graph
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# Off-by-one issue when casting floats to ints
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick',
dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_comprehensive',
dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"),
# UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API
# in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64!
# Caching allocator allocated memory was 0 and is now reported as 307200 on device 0.
# CUDA driver allocated memory was 1254555648 and is now 1242955776.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
dtypes=(torch.cfloat,), device_type="cuda"),
)),
UnaryUfuncInfo('log',
ref=np.log,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.chalf),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
),
# log(z)->-inf for |z|->0
reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),
UnaryUfuncInfo('log10',
ref=np.log10,
domain=(0, None),
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
assert_autodiffed=True,
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
),
# log10(z)->-inf for |z|->0
reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),
UnaryUfuncInfo('log1p',
ref=np.log1p,
aliases=('special.log1p',),
domain=(-1, None),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
skips=(
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
assert_autodiffed=True),
UnaryUfuncInfo('log2',
ref=np.log2,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble]),
),
# log2(z)->-inf for |z|->0
reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),
BinaryUfuncInfo('ldexp',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_inplace_autograd=False,
promotes_int_to_float=True,
supports_out=True,
supports_rhs_python_scalar=False,
skips=(
# RuntimeError: mul(): functions with out=... arguments don't support
# automatic differentiation, but one of the arguments requires grad
# https://github.com/pytorch/pytorch/issues/68966
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
),
decorators=[
DecorateInfo(
toleranceOverride({
torch.complex64: tol(atol=1e-05, rtol=1e-05)
}),
'TestCommon', device_type='cpu',
),
], ),
OpInfo('logaddexp',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.bfloat16),
dtypesIfROCM=floating_types_and(torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:
(SampleInput(make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad),
args=(make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad),)),)),
OpInfo('logaddexp2',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.bfloat16),
dtypesIfROCM=floating_types_and(torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:
(SampleInput(make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad),
args=(make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad),)),)),
UnaryUfuncInfo('logical_not',
ref=np.logical_not,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 5e-1}),),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_autograd=False,
skips=(
# The function variant always returns BoolTensor
# while the inplace variant preserves the input dtype.
# >>> t = torch.randn(3)
# >>> torch.logical_not(t)
# tensor([False, False, False])
# >>> torch.logical_not(t).dtype
# torch.bool
# >>> t.logical_not_().dtype
# torch.float32
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
)),
BinaryUfuncInfo('lt',
ref=np.less,
aliases=('less',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
always_returns_bool=True,
supports_autograd=False,
skips=(
)),
OpInfo('linalg.lu_factor',
aten_name='linalg_lu_factor',
op=torch.linalg.lu_factor,
dtypes=floating_and_complex_types(),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_linalg_lu,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]),
OpInfo('linalg.lu_factor_ex',
aten_name='linalg_lu_factor_ex',
op=torch.linalg.lu_factor_ex,
dtypes=floating_and_complex_types(),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_linalg_lu,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]),
OpInfo('linalg.lu',
aten_name='linalg_lu',
op=torch.linalg.lu,
dtypes=floating_and_complex_types(),
# https://github.com/pytorch/pytorch/issues/80411
# Runs very slowly on slow-gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_linalg_lu,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]),
OpInfo('lu_unpack',
op=torch.lu_unpack,
dtypes=floating_and_complex_types(),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(skipCPUIfNoLapack,),
sample_inputs_func=sample_inputs_lu_unpack),
OpInfo('lu',
op=torch.lu,
dtypes=floating_and_complex_types(),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_lu,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
# we skip jit tests because `lu` is a torch function
# RuntimeError:
# 'Tensor (inferred)' object has no attribute or method 'lu'.:
# File "<string>", line 3
# def the_method(i0):
# return i0.lu(True, True)
# ~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# RuntimeError not raised: Expected RuntimeError when calling with input.device=cpu and out.device=cuda
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
)),
OpInfo('lu_solve',
op=torch.lu_solve,
dtypes=floating_and_complex_types(),
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_lu_solve,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Tests different backward paths"),
"TestCommon", "test_floating_inputs_are_differentiable"),),
decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver]),
OpInfo('linalg.lu_solve',
op=torch.linalg.lu_solve,
aten_name='linalg_lu_solve',
dtypes=floating_and_complex_types(),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_lu_solve,
skips=(
DecorateInfo(unittest.skip("Tests different backward paths"),
"TestCommon", "test_floating_inputs_are_differentiable"),),
decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver]),
OpInfo('masked_fill',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_masked_fill,
error_inputs_func=error_inputs_masked_fill,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
supports_out=False),
OpInfo('masked_scatter',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_scatter,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False,
skips=(
)),
OpInfo('masked_select',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_masked_select,
error_inputs_func=error_inputs_masked_select),
OpInfo('matrix_exp',
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []),
aliases=('linalg.matrix_exp',),
sample_inputs_func=sample_inputs_matrix_exp,
# Needs to construct a 2nx2n matrix by copy_ ing into it
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
# times out
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'),
),
supports_out=False,
),
OpInfo('matmul',
aliases=('linalg.matmul',),
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16]
if (SM53OrLater and CUDA11OrLater) or TEST_WITH_ROCM else []),
assert_autodiffed=True,
assert_jit_shape_analysis=True,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_matmul,
decorators=[
# NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater),
# ROCm intermittently fails the test with standard atol/rtol
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}),
'TestCommon', 'test_noncontiguous_samples', device_type='cuda',
active_if=TEST_WITH_ROCM),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}),
'TestCommon', 'test_out', device_type='cuda',
active_if=TEST_WITH_ROCM),
# mv for the sample with shapes (S, S, M, M), (M,) has some variance in the
# backward on CPU
DecorateInfo(toleranceOverride({torch.float32: tol(atol=0, rtol=1e-5)}),
'TestCommon', 'test_noncontiguous_samples',
device_type='cpu'), ],
skips=(
# Strides are not the same!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# https://github.com/pytorch/pytorch/issues/67470
DecorateInfo(unittest.skip("67470!"),
'TestCommon', 'test_noncontiguous_samples',
device_type='cpu', dtypes=(torch.long,)),
# AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo',
device_type='xla', dtypes=(torch.long,)),
# https://github.com/pytorch/pytorch/issues/71774
DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness',
device_type='cpu', dtypes=(torch.long,)),
)),
OpInfo('max',
variant_test_name='reduction_with_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_reduction_with_dim,
supports_fwgrad_bwgrad=True,
skips=(
),
supports_forward_ad=True),
OpInfo('max',
variant_test_name='reduction_no_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_max_min_reduction_no_dim,
skips=(
)),
OpInfo('median',
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
# TODO: some signatures of median do support out
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),
OpInfo('nanmedian',
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
# TODO: some signatures of nanmedian do support out
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),
OpInfo('var_mean',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
# TODO: some signatures of var_mean do support out
supports_out=False,
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True),
OpInfo('std_mean',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
# TODO: some signatures of std_mean do support out
supports_out=False,
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True),
OpInfo('meshgrid',
variant_test_name='variadic_tensors',
ref=np.meshgrid,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),
sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'),
skips=[
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# meshgrid is defined in torch.functional to take a
# variadic list of tensors. Variadic parameters are not
# compatible with the normalize operator tests.
DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Skip operator schema test because this is a functional and not an operator
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
],
supports_out=False,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,),
OpInfo('meshgrid',
variant_test_name='list_of_tensors',
# Unlike the variant above, we do not use np.meshgrid as a
# ref since it does not officially support list of numpy
# arrays.
dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),
sample_inputs_func=partial(sample_inputs_meshgrid, variant='list'),
skips=[
# meshgrid is defined in torch.functional to take a
# variadic list of tensors. Variadic parameters are not
# compatible with the normalize operator tests.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
],
assert_autodiffed=True,
supports_out=False,
autodiff_nonfusible_nodes=[],
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,),
OpInfo('min',
variant_test_name='reduction_with_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_reduction_with_dim,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
skips=(
)),
OpInfo('min',
variant_test_name='reduction_no_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_max_min_reduction_no_dim,
skips=(
)),
OpInfo('quantile',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_reduction_quantile,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'),
),
# See https://github.com/pytorch/pytorch/issues/66357
# Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which
# does not have a batching rule in core
check_batched_forward_grad=False),
OpInfo('nanquantile',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_reduction_quantile,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'),
),
# See https://github.com/pytorch/pytorch/issues/66357
# Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which
# does not have a batching rule in core
check_batched_forward_grad=False),
BinaryUfuncInfo(
'max',
aliases=('maximum',),
variant_test_name='binary',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
ref=np.maximum,
supports_rhs_python_scalar=False,
skips=(
# Incorrectly attempts to use a scalar for the second argument
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),
# TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'),
)),
BinaryUfuncInfo(
'maximum',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
ref=np.maximum,
supports_rhs_python_scalar=False,
skips=(
# TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'),
)),
BinaryUfuncInfo(
'min',
aliases=('minimum',),
variant_test_name='binary',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
ref=np.minimum,
supports_rhs_python_scalar=False,
skips=(
# Incorrectly attempts to use a scalar for the second argument
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),
# TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
)),
BinaryUfuncInfo(
'minimum',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
ref=np.minimum,
supports_rhs_python_scalar=False,
skips=(
# TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
),
),
BinaryUfuncInfo('logical_and',
ref=np.logical_and,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_autograd=False,
always_returns_bool=True,
supports_rhs_python_scalar=False),
BinaryUfuncInfo('logical_or',
ref=np.logical_or,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_autograd=False,
always_returns_bool=True,
supports_rhs_python_scalar=False),
BinaryUfuncInfo('logical_xor',
ref=np.logical_xor,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_autograd=False,
always_returns_bool=True,
supports_rhs_python_scalar=False,
skips=(
)),
BinaryUfuncInfo('bitwise_and',
ref=np.bitwise_and,
dtypes=integral_types_and(torch.bool),
operator_variant=operator.and_,
inplace_operator_variant=operator.iand,
supports_autograd=False,
supports_one_python_scalar=True,
skips=(
# RuntimeError: "bitwise_and_cuda" not implemented for 'Half'
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs',
'test_type_promotion', device_type='cuda'),
)),
BinaryUfuncInfo('bitwise_or',
ref=np.bitwise_or,
dtypes=integral_types_and(torch.bool),
operator_variant=operator.or_,
inplace_operator_variant=operator.ior,
supports_autograd=False,
supports_one_python_scalar=True,
skips=(
# TODO: FIXME: RuntimeError: "bitwise_or_cuda" not implemented for 'Half'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
)),
BinaryUfuncInfo('bitwise_xor',
ref=np.bitwise_xor,
dtypes=integral_types_and(torch.bool),
operator_variant=operator.xor,
inplace_operator_variant=operator.ixor,
supports_autograd=False,
supports_one_python_scalar=True,
skips=(
# TODO: FIXME: RuntimeError: "bitwise_xor_cuda" not implemented for 'Half'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
)),
BinaryUfuncInfo('heaviside',
ref=lambda a, b: (
# necessary because np.heaviside incorrectly returns float64 when passed args of dtype int64
np.int64(np.heaviside(a, b)) if a.dtype == np.int64 and b.dtype == np.int64 else np.heaviside(a, b)
),
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
supports_autograd=False,
supports_rhs_python_scalar=False,
skips=(
# RuntimeError: heaviside is not yet implemented for tensors with different dtypes.
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion'),
# PyTorch's heaviside does not appear to propagate NaNs
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_extremal_values'),
)),
BinaryUfuncInfo('lcm',
ref=np.lcm,
dtypes=integral_types_and(),
supports_autograd=False,
supports_rhs_python_scalar=False),
BinaryUfuncInfo('gcd',
ref=np.gcd,
dtypes=integral_types_and(),
supports_autograd=False,
supports_rhs_python_scalar=False,
skips=(
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.int8,)),)),
BinaryUfuncInfo('isclose',
ref=np.isclose,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_isclose,
error_inputs_func=error_inputs_isclose,
supports_autograd=False,
supports_out=False,
supports_rhs_python_scalar=False,
skips=(
DecorateInfo(unittest.expectedFailure,
'TestCommon',
'test_numpy_refs', dtypes=(torch.complex128,)),
# RuntimeError: Short did not match Int
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion'),
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_extremal_values'),
)),
# `softmax` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
# https://github.com/pytorch/pytorch/issues/68752
OpInfo('softmax',
aliases=('special.softmax', 'nn.functional.softmax',),
aten_name='softmax',
aten_backward_name='_softmax_backward_data',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
assert_jit_shape_analysis=True,
assert_autodiffed=True,
supports_forward_ad=True,
supports_out=True),
OpInfo('softmax',
aliases=('special.softmax', 'nn.functional.softmax',),
variant_test_name="with_dtype",
aten_name='softmax',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
assert_autodiffed=True,
supports_forward_ad=True,
supports_out=True),
# `softmin` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
# https://github.com/pytorch/pytorch/issues/68752
OpInfo('nn.functional.softmin',
aten_name='softmin',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
assert_jit_shape_analysis=False,
assert_autodiffed=False,
supports_forward_ad=True,
supports_out=False),
OpInfo('nn.functional.softmin',
variant_test_name="with_dtype",
aten_name='softmin',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
assert_autodiffed=False,
supports_forward_ad=True,
supports_out=False),
OpInfo(
"nn.functional.cross_entropy",
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_cross_entropy,
supports_out=False,
supports_forward_ad=True,
decorators=(
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-3)}),
"TestJit",
"test_variant_consistency_jit",
device_type="cpu",
),
),
skips=(
# AssertionError: False is not true : Scalars failed to compare as equal! 0 != 1536
# test_ops.TestJitCUDA.test_variant_consistency_jit_nn_functional_cross_entropy_cuda_float32 leaked
# 1536 bytes CUDA memory on device 0
DecorateInfo(
unittest.expectedFailure,
"TestJit",
"test_variant_consistency_jit",
device_type="cuda",
),
)
),
OpInfo('nn.functional.normalize',
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_normalize,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
OpInfo('aminmax',
ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)),
dtypes=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16),
decorators=(onlyNativeDeviceTypes,),
supports_autograd=False,
sample_inputs_func=sample_inputs_aminmax,
error_inputs_func=error_inputs_aminmax_amax_amin,
skips=(
# AssertionError: Resizing an out= argument with no elements threw a resize warning!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'),
)),
OpInfo('as_strided',
op=lambda x, size, stride, storage_offset=0:
torch.as_strided(x, size, stride, storage_offset=storage_offset),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
sample_inputs_func=sample_inputs_as_strided,
skips=(
# Note: This xfail is fine -- it's inherent to how as_strided works
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),
# AssertionError: False is not true : Scalars failed to compare as equal!
DecorateInfo(unittest.skip("Errors when storage_offset is included"),
'TestCommon', 'test_variant_consistency_eager'),
# Not close
DecorateInfo(unittest.skip("Errors when storage_offset is included"),
'TestCommon', 'test_complex_half_reference_testing'),
# Not close
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.skip("Numerous errors"), 'TestGradients'))),
OpInfo('as_strided_scatter',
op=lambda x, src, size, stride, storage_offset=0:
torch.as_strided_scatter(x, src, size, stride, storage_offset=storage_offset),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
sample_inputs_func=sample_inputs_as_strided_scatter,
skips=(
DecorateInfo(unittest.skip('Works only for CPU complex64'), 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.skip('Works for float64, fails for everything else'), 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.skip('Works for int64, fails for everything else'), 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950
DecorateInfo(unittest.skip('Fails in most cases, passes on LAZY for some reason'), 'TestCommon', 'test_variant_consistency_eager'), # noqa: B950
DecorateInfo(unittest.skip('Only fails for LAZY, passes on everything else'), 'TestCompositeCompliance', 'test_backward'), # noqa: B950
DecorateInfo(unittest.skip('Passes on complex64 and float32 only'), 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('Fails on cuda + rocm'), 'TestCommon', 'test_complex_half_reference_testing'),
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'),
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),
DecorateInfo(unittest.skip('Passes on complex128 and float64 only'), 'TestGradients', 'test_fn_fwgrad_bwgrad'),)),
OpInfo('native_layer_norm',
aten_name='native_layer_norm',
ref=reference_native_layer_norm,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
assert_jit_shape_analysis=True,
sample_inputs_func=sample_inputs_native_layer_norm,
error_inputs_func=error_inputs_native_layer_norm,
skips=(
# IndexError: tuple index out of range
DecorateInfo(unittest.skip('Skipped!'), 'TestGradients', 'test_forward_mode_AD'),
# Tests fail when weight=None and bias is defined
# https://github.com/pytorch/pytorch/issues/79705
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),
# JIT test also tries to compute double backward, which fails
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Extremal value issue on aten::native_layer_norm, which returns 'nan' for mean on 'inf' inputs
# possibly because of the welford implementation.
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'),
)),
OpInfo('nn.functional.cosine_similarity',
aten_name="cosine_similarity",
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_cosine_similarity),
OpInfo('nn.functional.adaptive_avg_pool1d',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_avg_pool1d),
OpInfo('nn.functional.adaptive_avg_pool2d',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
decorators=(
# RuntimeError:
# adaptive_avg_pool2d(Tensor input, int[2] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, int]'. :
# File "<string>", line 3
# def the_method(i0):
# return torch.nn.functional.adaptive_avg_pool2d(i0, (None, 7))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_avg_pool2d),
OpInfo('nn.functional.adaptive_avg_pool3d',
dtypes=floating_types_and(torch.half),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
decorators=(
# RuntimeError:
# adaptive_avg_pool3d(Tensor input, int[3] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, NoneType, NoneType]'. :
# File "<string>", line 3
#
# def the_method(i0):
# return torch.nn.functional.adaptive_avg_pool3d(i0, (None, None, None))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
#
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_avg_pool3d),
OpInfo('nn.functional.adaptive_max_pool1d',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# got: Batching rule not implemented for aten::flatten.using_ints
check_batched_forward_grad=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_max_pool1d),
OpInfo('nn.functional.adaptive_max_pool2d',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
decorators=(
# RuntimeError:
# adaptive_max_pool2d(Tensor input, int[2] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, int]'. :
# File "<string>", line 3
# def the_method(i0):
# return torch.nn.functional.adaptive_max_pool2d(i0, (None, 7))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# got: Batching rule not implemented for aten::flatten.using_ints
check_batched_forward_grad=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_max_pool2d),
OpInfo('nn.functional.adaptive_max_pool3d',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
decorators=(
# RuntimeError:
# adaptive_max_pool3d(Tensor input, int[3] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, NoneType, NoneType]'. :
# File "<string>", line 3
#
# def the_method(i0):
# return torch.nn.functional.adaptive_max_pool3d(i0, (None, None, None))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
#
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# got: Batching rule not implemented for aten::flatten.using_ints
check_batched_forward_grad=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_max_pool3d),
OpInfo('nn.functional.avg_pool1d',
aten_name='avg_pool1d',
supports_autograd=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.int64, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_avgpool1d),
OpInfo('nn.functional.avg_pool3d',
aten_name='avg_pool3d',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_avgpool3d,
skips=(
# AssertionError: Tensor-likes are not close!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'),
)),
OpInfo(
"nn.functional.binary_cross_entropy_with_logits",
aten_name="binary_cross_entropy_with_logits",
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_binary_cross_entropy_with_logits,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
'TestJit',
'test_variant_consistency_jit',
dtypes=(torch.float32,)
),
),
),
UnaryUfuncInfo(
'nn.functional.relu',
aten_name="relu",
ref=lambda a: np.where(a <= 0, 0, a),
supports_autograd=True,
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_nn_activation_relu,
supports_out=False,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True),
OpInfo('nn.functional.conv_transpose1d',
aten_name='conv_transpose1d',
aliases=('conv_transpose1d',),
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []),
sample_inputs_func=sample_inputs_conv_transpose1d,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda')],
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,),
OpInfo('nn.functional.conv_transpose2d',
aten_name='conv_transpose2d',
aliases=('conv_transpose2d',),
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16,
*[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []),
sample_inputs_func=sample_inputs_conv_transpose2d,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda')],
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,),
OpInfo('nn.functional.conv_transpose3d',
aten_name='conv_transpose3d',
aliases=('conv_transpose3d',),
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []),
sample_inputs_func=sample_inputs_conv_transpose3d,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
# Runs very slowly on slow-gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=2e-04, rtol=2e-04), }),
'TestCompositeCompliance', 'test_operator', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1.3e-04, rtol=1.3e-06), }),
'TestCommon', 'test_noncontiguous_samples', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=2e-05), }),
'TestCompositeCompliance', 'test_forward_ad', device_type='cuda',
active_if=TEST_CUDNN)],
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Skipped! 75029"), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'),
DecorateInfo(unittest.skip("Skipped! 75363"), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'),
DecorateInfo(unittest.skip("Skipped! RuntimeError: bias tensor has to be contiguous"), 'TestGradients',
'test_forward_mode_AD', device_type='cuda', active_if=TEST_WITH_ROCM),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad', device_type='cuda',
active_if=(not TEST_CUDNN)),
),
supports_out=False,),
OpInfo('nn.functional.conv1d',
aliases=('conv1d',),
aten_name='conv1d',
dtypes=floating_and_complex_types_and(torch.int64, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf,
*[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []),
sample_inputs_func=sample_inputs_conv1d,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=(
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=1e-2)}),
'TestCommon', 'test_complex_half_reference_testing'
),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=1e-3, rtol=1e-3)}),
'TestCudaFuserOpInfo', 'test_nvfuser_correctness',
),
),
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Ref: https://github.com/pytorch/pytorch/issues/75309
# AssertionError: None mismatch: torch.complex128 is not None
DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules',
'test_custom_rules', dtypes=(torch.complex64, torch.complex128)),
# Ref: https://github.com/pytorch/pytorch/issues/75309
# RuntimeError: UNSUPPORTED DTYPE: complex
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo',
'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)),
),
supports_expanded_weight=True,
supports_out=False,),
OpInfo('nn.functional.conv2d',
aliases=('conv2d',),
aten_name='conv2d',
dtypes=floating_and_complex_types_and(torch.int64, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf,
*[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []),
sample_inputs_func=partial(sample_inputs_conv2d),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
decorators=(
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}),
'TestCommon', 'test_complex_half_reference_testing',
),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=1e-2)}),
'TestCudaFuserOpInfo', 'test_nvfuser_correctness',
),
),
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Works on some configs!"), 'TestJit', 'test_variant_consistency_jit'),
# Ref: https://github.com/pytorch/pytorch/issues/75309
# AssertionError: None mismatch: torch.complex128 is not None
DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules',
'test_custom_rules', dtypes=(torch.complex64, torch.complex128)),
# RuntimeError: UNSUPPORTED DTYPE: complex
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo',
'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)),
),
supports_expanded_weight=True,
supports_out=False,),
OpInfo('nn.functional.group_norm',
aten_name='group_norm',
aliases=('group_norm',),
ref=reference_group_norm,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[
# RuntimeError: Cannot insert a Tensor that requires grad as a constant.
# Consider making it a parameter or input, or detaching the gradient
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,))
],
sample_inputs_func=sample_inputs_group_norm,
supports_expanded_weight=True,),
OpInfo('nn.functional.instance_norm',
# no ref because instance_norm will often have numerical instability (large numbers or nan)
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
decorators=[
# RuntimeError: Cannot insert a Tensor that requires grad as a constant.
# Consider making it a parameter or input, or detaching the gradient
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad',
active_if=TEST_WITH_ROCM)
],
sample_inputs_func=sample_inputs_instance_norm,
supports_expanded_weight=True,),
OpInfo('nn.functional.layer_norm',
aten_name='layer_norm',
aten_backward_name='layer_norm_backward',
aliases=('layer_norm',),
ref=reference_layer_norm,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
assert_jit_shape_analysis=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-03)}),
'TestCommon', 'test_numpy_refs'
)
],
sample_inputs_func=sample_inputs_layer_norm,
supports_expanded_weight=True,),
OpInfo('nn.functional.local_response_norm',
dtypes=floating_types_and(torch.int64, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestCudaFuserOpInfo', 'test_nvfuser_correctness'),
],
sample_inputs_func=sample_inputs_local_response_norm,),
OpInfo('constant_pad_nd',
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=sample_inputs_constant_pad_nd,
supports_out=False,
skips=(
# bool can't be passed to Scalar arguments in JIT tracer because
# BoolType is not a subtype of ScalarType.
DecorateInfo(
unittest.expectedFailure, 'TestNNCOpInfo',
'test_nnc_correctness', dtypes=(torch.bool,)),
DecorateInfo(
unittest.expectedFailure, 'TestCudaFuserOpInfo',
'test_nvfuser_correctness', dtypes=(torch.bool,)),
)),
OpInfo('nn.functional.pad',
variant_test_name='constant',
aten_name='constant_pad_nd',
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='constant'),
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='reflect',
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='reflect'),
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='replicate',
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='replicate'),
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='circular',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='circular'),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_grad=False,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
supports_out=False),
OpInfo('nn.functional.hardswish',
aten_name="hardswish",
aten_backward_name='hardswish_backward',
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardswish,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_gradgrad=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
autodiff_nonfusible_nodes=["aten::hardswish"]),
OpInfo('nn.functional.unfold',
aten_name='im2col',
aten_backward_name='im2col_backward',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
sample_inputs_func=sample_inputs_nn_unfold,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
skips=(
# NOTE: this failure may not reproduce consistently on different systems
# false INTERNAL ASSERT FAILED at "...torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185
DecorateInfo(unittest.skip("Internal assert failed!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='nearest',
supports_autograd=True,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
dtypes=floating_types_and(torch.uint8, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.uint8),
sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='linear',
supports_autograd=True,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_interpolate, 'linear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='bilinear',
supports_fwgrad_bwgrad=True,
supports_autograd=True,
supports_forward_ad=True,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='bicubic',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_interpolate, 'bicubic'),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='trilinear',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='area',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_interpolate, 'area'),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.upsample_bilinear',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_upsample, 'bilinear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo(
"nn.functional.soft_margin_loss",
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
# doesn't support grad on target
sample_inputs_func=partial(sample_inputs_loss, rhs_requires_grad=False),
),
OpInfo('nn.functional.upsample_nearest',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.uint8, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.uint8),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_upsample, 'nearest'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo(
"nn.functional.margin_ranking_loss",
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
supports_out=False,
sample_inputs_func=sample_inputs_margin_ranking_loss,
error_inputs_func=error_inputs_margin_ranking_loss,
reference_inputs_func=reference_inputs_margin_ranking_loss,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
OpInfo(
"nn.functional.multi_margin_loss",
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
supports_out=False,
supports_gradgrad=False,
sample_inputs_func=sample_inputs_multi_margin_loss,
),
OpInfo(
"nn.functional.multilabel_margin_loss",
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
supports_out=False,
supports_gradgrad=False,
sample_inputs_func=sample_inputs_multilabel_margin_loss
),
OpInfo('nn.functional.leaky_relu',
aliases=None,
aten_name="leaky_relu",
aten_backward_name='leaky_relu_backward',
sample_inputs_func=sample_inputs_leaky_relu,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=True,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_nonfusible_nodes=["aten::leaky_relu"]),
OpInfo(
"nn.functional.multilabel_soft_margin_loss",
supports_out=False,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_multilabel_soft_margin_loss,
supports_forward_ad=True,
decorators=(
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}),
"TestJit",
"test_variant_consistency_jit",
),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestCudaFuserOpInfo', 'test_nvfuser_correctness'),
),
skips=(
# AssertionError: False is not true : Scalars failed to compare as equal! 0 != 4096
# __main__.TestJitCUDA.test_variant_consistency_jit_nn_functional_multilabel_soft_margin_loss_cuda_float32
# leaked 4096 bytes CUDA memory on device 0
DecorateInfo(
# Skip instead of expectedFailure because this fails
# locally for me but passes in CI.
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="cuda",
),
),
),
OpInfo('nn.functional.avg_pool2d',
aten_name='avg_pool2d',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.int64, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_avgpool2d,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'),
)),
OpInfo('nn.functional.fractional_max_pool2d',
supports_autograd=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.fractional_max_pool2d, input, *args, **kwargs),
# vmap does not support random operations
check_batched_forward_grad=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
test_neg_view=False,
sample_inputs_func=sample_inputs_fractional_max_pool2d,
decorators=(
# FIXME: AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'))),
OpInfo('nn.functional.fractional_max_pool3d',
supports_autograd=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.fractional_max_pool3d, input, *args, **kwargs),
# vmap does not support random operations
check_batched_forward_grad=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
test_neg_view=False,
sample_inputs_func=sample_inputs_fractional_max_pool3d,
decorators=(
# FIXME: both derivatives are implemented incorrectly
# https://github.com/pytorch/pytorch/issues/69322
# FIXME: AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),)),
OpInfo('nn.functional.max_pool1d',
aten_name='max_pool1d',
supports_autograd=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# got: Batching rule not implemented for aten::flatten.using_ints
check_batched_forward_grad=False,
# TODO: add shape checks
assert_jit_shape_analysis=False,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator', device_type='cpu'),
DecorateInfo(unittest.skip("Works on some configs"), 'TestNNCOpInfo',
'test_nnc_correctness', dtypes=(torch.bfloat16,)),
# RuntimeError: The tensor has a non-zero number of elements, but its data is not allocated yet.
# Caffe2 uses a lazy allocation, so you will need to call mutable_data() or raw_mutable_data()
# to actually allocate memory
DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'),
),
sample_inputs_func=sample_inputs_max_pool),
OpInfo('nn.functional.max_pool2d',
aten_name='max_pool2d',
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
# Vmap is not happy with non-contiguous (channels_last) inputs
check_batched_gradgrad=False,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# got: Batching rule not implemented for aten::flatten.using_ints
check_batched_forward_grad=False,
assert_jit_shape_analysis=True,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_max_pool),
OpInfo('nn.functional.max_pool3d',
aten_name='max_pool3d',
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# got: Batching rule not implemented for aten::flatten.using_ints
check_batched_forward_grad=False,
# TODO: add shape checks
assert_jit_shape_analysis=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
# TODO: investigate nondeterminism
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_max_pool),
OpInfo('nn.functional.max_unpool1d',
aten_name='max_unpool1d',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
assert_jit_shape_analysis=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
sample_inputs_func=sample_inputs_max_unpool,
skips=(
# Gradients are tested in `variant_test_name=grad` below.
# We skip tests here because there is non-determinism in backward
# with gather, when there are writes into the same memory location,
# and if there are several indices pointing to the same memory,
# gradcheck is oblivious about that and cannot perturb them all at once
# (see sample_inputs_max_unpool_grad to find out more).
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad',
device_type='cpu'),
)),
OpInfo('nn.functional.max_unpool1d',
variant_test_name='grad',
aten_name='max_unpool1d',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
assert_jit_shape_analysis=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
sample_inputs_func=sample_inputs_max_unpool_grad),
OpInfo('nn.functional.max_unpool2d',
aten_name='max_unpool2d',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
assert_jit_shape_analysis=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
sample_inputs_func=sample_inputs_max_unpool,
skips=(
# Gradients are tested in `variant_test_name=grad` below.
# We skip tests here because there is non-determinism in backward
# with gather, when there are writes into the same memory location,
# and if there are several indices pointing to the same memory,
# gradcheck is oblivious about that and cannot perturb them all at once
# (see sample_inputs_max_unpool_grad to find out more).
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_forward_mode_AD'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'),
)),
OpInfo('nn.functional.max_unpool2d',
variant_test_name='grad',
aten_name='max_unpool2d',
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# Vmap is not happy with non-contiguous (channels_last) inputs
check_batched_grad=False,
supports_out=False,
assert_jit_shape_analysis=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
sample_inputs_func=sample_inputs_max_unpool_grad),
OpInfo('nn.functional.max_unpool3d',
aten_name='max_unpool3d',
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
assert_jit_shape_analysis=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
sample_inputs_func=sample_inputs_max_unpool,
skips=(
# Gradients are tested in `variant_test_name=grad` below.
# We skip tests here because there is non-determinism in backward
# with gather, when there are writes into the same memory location,
# and if there are several indices pointing to the same memory,
# gradcheck is oblivious about that and cannot perturb them all at once
# (see sample_inputs_max_unpool_grad to find out more).
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_forward_mode_AD'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'),
)),
OpInfo('nn.functional.max_unpool3d',
variant_test_name='grad',
aten_name='max_unpool3d',
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
assert_jit_shape_analysis=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
sample_inputs_func=sample_inputs_max_unpool_grad),
OpInfo('nn.functional.linear',
aten_name='linear',
supports_autograd=True,
sample_inputs_func=sample_inputs_linear,
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16]
if (CUDA11OrLater or TEST_WITH_ROCM) else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16]
if (CUDA11OrLater or TEST_WITH_ROCM) else []),
# linear calls mm under the hood which is nondeterministic on CUDA
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_expanded_weight=True,
decorators=(
# Strides are not the same!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestCudaFuserOpInfo', 'test_nvfuser_correctness'),
)),
OpInfo('nn.functional.bilinear',
aten_name='bilinear',
supports_autograd=True,
sample_inputs_func=sample_inputs_bilinear,
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16,
*[torch.bfloat16] if (SM53OrLater and CUDA11OrLater) or TEST_WITH_ROCM else []),
skips=(
# NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)),
),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('nn.functional.glu',
aten_name='glu',
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
sample_inputs_func=sample_inputs_glu,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfROCM=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
UnaryUfuncInfo(
'nn.functional.elu',
aten_backward_name='elu_backward',
ref=lambda x, alpha=1.0, inplace=False:
np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x) - 1)),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_kwargs=lambda device, dtype, input:
({'alpha': 0.8}, {'alpha': 0.8}),
inplace_variant=lambda x, alpha=1.0:
torch.nn.functional.elu(x, alpha, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-03, rtol=1.2e-03),
torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
),
UnaryUfuncInfo(
'nn.functional.prelu',
aten_backward_name='prelu_backward',
ref=lambda x, weight:
np.maximum(0., x) + np.minimum(0., x) *
(weight if x.ndim == 1 else weight.reshape([weight.size if i == 1 else 1 for i in range(0, x.ndim)])),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
# test_reference_numerics only tests the case when the weight tensor is a scalar
sample_kwargs=sample_kwargs_prelu_scalar_weight,
error_inputs_func=error_inputs_prelu,
sample_inputs_func=sample_inputs_prelu,
reference_inputs_func=reference_inputs_prelu,
decorators=[
# FIXME: second derivative is implemented but seems to be incorrect
# https://github.com/pytorch/pytorch/issues/68760
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),
# RuntimeError: Cannot insert a Tensor that requires grad as a constant.
# Consider making it a parameter or input, or detaching the gradient
# https://github.com/pytorch/pytorch/issues/68752
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ],
),
UnaryUfuncInfo(
'nn.functional.celu',
ref=lambda x, alpha=1.0, inplace=False:
np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x / alpha) - 1)),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_kwargs=lambda device, dtype, input:
({'alpha': 0.8}, {'alpha': 0.8}),
inplace_variant=lambda x, alpha=1.0:
torch.nn.functional.celu(x, alpha, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-03, rtol=1.2e-03),
torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
),
UnaryUfuncInfo(
'nn.functional.rrelu',
aten_backward_name='rrelu_with_noise_backward',
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.rrelu, input, *args, **kwargs),
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.rrelu, input, *args, inplace=True, **kwargs),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
gradcheck_wrapper=wrapper_set_seed,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
sample_kwargs=lambda device, dtype, input:
(dict(lower=0., upper=1., training=True), dict(lower=0., upper=1., training=True)),
sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs=dict(lower=0., upper=1., training=True)),
decorators=(
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-03, rtol=1.2e-03),
torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)
}),
'TestUnaryUfuncs', device_type='cuda',
),),
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# In-place operations do not play well with forward AD
# https://github.com/pytorch/pytorch/issues/77447
DecorateInfo(unittest.expectedFailure, 'TestGradients',
'test_inplace_forward_mode_AD'),
# The noise vector that's generated in these tests is not the same elementwise
DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'),
DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'),
DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_non_contig_expand'),
DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'),)),
UnaryUfuncInfo(
'nn.functional.selu',
ref=lambda x, inplace=False:
1.0507009873554804934193349852946 * (
np.maximum(0., x) + np.minimum(0., 1.6732632423543772848170429916717 * (np.exp(x) - 1))
),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True, # depends on 'elu'
supports_fwgrad_bwgrad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
inplace_variant=lambda x: torch.nn.functional.selu(x, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-2, rtol=1.8e-2),
torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
),
UnaryUfuncInfo(
'nn.functional.silu',
aten_backward_name='silu_backward',
ref=lambda x, inplace=False: x / (1 + np.exp(-x)),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_autograd=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
supports_out=False,
inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-3, rtol=1e-3),
torch.bfloat16: tol(atol=1e-4, rtol=1e-4)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=(torch.cfloat,), device_type='cpu'),
),
autodiff_nonfusible_nodes=["aten::silu"],
),
# TODO: combine this with the nn.functional.silu OpInfo when
# complex autodiff for silu is supported or when
# the forward bug is fixed
# Note: silu errors when given inputs that require grad
# but it doesn't support grad in their dtype
# This is why the dtypes list above passes test_dtypes,
# because it's getting lucky and failing in forward
# because test_dtypes sets requires_grad to True
# THIS IS A BUG
UnaryUfuncInfo(
'nn.functional.silu',
variant_test_name='complex',
ref=lambda x, inplace=False:
x / (1 + np.exp(-x)),
dtypes=complex_types(),
dtypesIfCUDA=empty_types(),
supports_forward_ad=False,
supports_autograd=False,
assert_autodiffed=False,
supports_out=False,
inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-3, rtol=1e-3),
torch.bfloat16: tol(atol=1e-4, rtol=1e-4)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=(torch.cfloat,), device_type='cpu'),
# FIXME: intentionally misreports dtypes
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'),
# FIXME: numpy reference diverges: Comparing (nan+nanj) and (-0+0j)
DecorateInfo(unittest.skip("Skipped!"),
'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.complex64, torch.cdouble)),
DecorateInfo(unittest.skip("Skipped!"),
'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=(torch.complex64,)),
DecorateInfo(unittest.skip("Skipped!"),
'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=(torch.complex64,)))),
UnaryUfuncInfo(
'nn.functional.hardsigmoid',
aten_backward_name='hardsigmoid_backward',
ref=reference_hardsigmoid,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=False,
supports_forward_ad=True,
supports_out=False,
inplace_variant=partial(torch.nn.functional.hardsigmoid, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-04, rtol=0.001)}), 'TestUnaryUfuncs', device_type='cuda',), ],
skips=[
# still want to test that first derivative works though second derivative isn't supported
DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_inplace_gradgrad"),
# produces 0 instead of nan on ROCM
DecorateInfo(unittest.expectedFailure,
'TestUnaryUfuncs', "test_reference_numerics_extremal",
device_type='cuda',
active_if=(TEST_WITH_ROCM)), ]
),
UnaryUfuncInfo(
'nn.functional.logsigmoid',
aten_name="log_sigmoid",
aten_backward_name='log_sigmoid_backward',
ref=reference_logsigmoid,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=False,
supports_forward_ad=True,
supports_gradgrad=True,
# autodiff_nonfusible_nodes=["aten::log_sigmoid"],
decorators=[
DecorateInfo(
precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}),
'TestUnaryUfuncs', 'test_reference_numerics_small'),
DecorateInfo(
precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}),
'TestUnaryUfuncs', 'test_reference_numerics_large'),
DecorateInfo(
precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}),
'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
],
skips=(
# Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cpu'),
),
),
UnaryUfuncInfo(
'nn.functional.mish',
aten_backward_name='mish_backward',
ref=lambda x: x * np.tanh(reference_softplus(x)),
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
inplace_variant=partial(torch.nn.functional.mish, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestUnaryUfuncs', device_type='cuda',), ],
),
UnaryUfuncInfo(
'nn.functional.softsign',
ref=lambda x: x / (np.abs(x) + 1),
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1.3e-04)}), 'TestUnaryUfuncs',), ],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=(torch.int, torch.int8)),
# pytorch computes (0+nanj), numpy computes (-5e-18-1j) for input (-501.-1.0000e+20j)
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs',
"test_reference_numerics_large", dtypes=(torch.complex64,)),),
),
UnaryUfuncInfo(
'nn.functional.tanhshrink',
ref=lambda x: x - np.tanh(x),
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
decorators=[
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(
toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), 'TestUnaryUfuncs',),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestCudaFuserOpInfo', 'test_nvfuser_correctness'),
],
skips=(
# in each case, pytorch will produce a nan while numpy will not
DecorateInfo(unittest.expectedFailure,
'TestUnaryUfuncs', "test_reference_numerics_small",
dtypes=(torch.complex64, torch.complex128), active_if=(IS_MACOS)),
DecorateInfo(unittest.skip("Fails on some jobs works on others!"),
'TestUnaryUfuncs', "test_reference_numerics_large",
dtypes=(torch.complex64, torch.complex128), active_if=(IS_MACOS)),
DecorateInfo(unittest.skip("Fails on some jobs works on others!"),
'TestUnaryUfuncs', "test_reference_numerics_extremal",
dtypes=(torch.complex64, torch.complex128), device_type='cpu',
active_if=(IS_MACOS or IS_WINDOWS)),
),
),
UnaryUfuncInfo(
'nn.functional.threshold',
ref=lambda x, threshold, value: np.where(x <= threshold, value, x).astype(x.dtype),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_kwargs=lambda device, dtype, input: ({'threshold': 0.123,
'value': -9},
{'threshold': 0.123,
'value': -9}),
# TODO(whc) should not need sample_inputs_func, but without it
# kwargs aren't being hooked up properly
sample_inputs_func=sample_inputs_threshold,
),
OpInfo(
"nn.functional.triplet_margin_loss",
sample_inputs_func=sample_inputs_triplet_margin_loss,
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
OpInfo(
"nn.functional.triplet_margin_with_distance_loss",
sample_inputs_func=partial(sample_inputs_triplet_margin_loss, with_distance=True),
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# This test cannot handle a callable passed to `distance_function`. If we would use
# `distance_function=None`, the test would pass fine.
DecorateInfo(
unittest.expectedFailure,
"TestJit",
"test_variant_consistency_jit",
),
DecorateInfo(
unittest.expectedFailure,
"TestNormalizeOperators",
"test_normalize_operator_exhaustive",
),
),
),
BinaryUfuncInfo('nextafter',
dtypes=floating_types_and(torch.bfloat16),
supports_autograd=False,
supports_rhs_python_scalar=False),
OpInfo('topk',
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
sample_inputs_func=sample_inputs_topk),
# Multiple variants for batch_norm to test with and without cuDNN disabled
# See https://github.com/pytorch/pytorch/pull/63218#discussion_r688549391 for more details
OpInfo('nn.functional.batch_norm',
aten_name='batch_norm',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
assert_jit_shape_analysis=True,
sample_inputs_func=sample_inputs_batch_norm,
skips=(
# see https://github.com/pytorch/pytorch/issues/71286
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'),
DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness',
device_type='cpu', dtypes=(torch.bfloat16,)),
# see https://github.com/pytorch/pytorch/issues/76283
DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad',
device_type="cpu"),
# Trying to use forward AD with miopen_batch_norm that does not support it
# because it has not been implemented yet.
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad',
device_type="cuda", active_if=TEST_WITH_ROCM),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestCudaFuserOpInfo', 'test_nvfuser_correctness'),
)),
# This variant tests batch_norm with cuDNN disabled only on CUDA devices
OpInfo('nn.functional.batch_norm',
variant_test_name='without_cudnn',
aten_name='batch_norm',
dtypes=empty_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
decorators=[onlyCUDA, disablecuDNN],
skips=(
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad',
device_type='cpu'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestCudaFuserOpInfo', 'test_nvfuser_correctness'),
),
sample_inputs_func=sample_inputs_batch_norm),
OpInfo(
"nn.functional.binary_cross_entropy",
aten_backward_name='binary_cross_entropy_backward',
sample_inputs_func=sample_inputs_binary_cross_entropy,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
gradcheck_fast_mode=False,
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(
# RuntimeError: expected int at position 0, but got: Tensor
DecorateInfo(
unittest.skip("Skipped!"),
"TestCudaFuserOpInfo",
),
# RuntimeError: expected int at position 0, but got: Tensor
DecorateInfo(
unittest.skip("Skipped!"),
"TestNNCOpInfo",
"test_nnc_correctness",
),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-3, rtol=1e-3)}),
"TestJit",
"test_variant_consistency_jit",
),
),
skips=(
# RuntimeError: expected int at position 0, but got: Tensor
DecorateInfo(
unittest.expectedFailure,
"TestJit",
"test_variant_consistency_jit",
),
),
),
# We have to add 2 OpInfo entry for `igamma` and `igammac`.First is the
# standard entry, second is to run gradcheck tests on the second argument.
BinaryUfuncInfo('igamma',
dtypes=floating_types_and(torch.bfloat16, torch.float16),
aliases=('torch.special.gammainc',),
dtypesIfCUDA=floating_types(),
# TODO: FIXME
supports_rhs_python_scalar=False,
supports_autograd=False,
skips=(
# FIXME: incorrectly tries to pass a rhs scalar
DecorateInfo(unittest.expectedFailure, 'TestJit',
'test_jit_alias_remapping'),
)),
# TODO: FIXME, ideally by implemented grad for both inputs
# BinaryUfuncInfo('igamma',
# variant_test_name='grad_other',
# # Since autograd formula is implemented only for other and
# # gradcheck test verifies the formula for input in SampleInput,
# # we permute the arguments.
# op=lambda self, other, **kwargs: torch.igamma(other, self, **kwargs),
# inplace_variant=None,
# method_variant=None,
# supports_rhs_python_scalar=False,
# rhs_make_tensor_kwargs=dict(requires_grad=False),
# dtypes=floating_types_and(torch.bfloat16, torch.float16),
# backward_dtypesIfCPU=floating_types_and(torch.bfloat16),
# dtypesIfCUDA=floating_types(),
# backward_dtypesIfCUDA=floating_types(),
# supports_inplace_autograd=False,
# skips=(
# # Derivative wrt first tensor not implemented
# DecorateInfo(unittest.expectedFailure, "TestCommon",
# "test_floating_inputs_are_differentiable"),"),
# # test does not work with passing lambda for op
# # AssertionError: False is not true : Tensors failed to compare as equal!
# DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# # test fails are we permute the arguments function variant
# # but not for inplace or method.
# DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
# # TypeError: igamma(): argument 'input' (position 1) must be Tensor, not float
# DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'),
# )),
BinaryUfuncInfo('igammac',
dtypes=floating_types_and(torch.bfloat16, torch.float16),
aliases=('torch.special.gammaincc',),
dtypesIfCUDA=floating_types(),
supports_autograd=False,
supports_rhs_python_scalar=False,
skips=(
# FIXME: incorrectly tries to pass a rhs scalar
DecorateInfo(unittest.expectedFailure, 'TestJit',
'test_jit_alias_remapping'),
)),
# TODO: FIXME, ideally by implementing grad for both inputs
# BinaryUfuncInfo('igammac',
# variant_test_name='grad_other',
# # Since autograd formula is implemented only for other and
# # gradcheck test verifies the formula for input in SampleInput,
# # we permute the arguments
# op=lambda self, other, **kwargs: torch.igammac(other, self, **kwargs),
# inplace_variant=None,
# method_variant=None,
# supports_rhs_python_scalar=False,
# rhs_make_tensor_kwargs=dict(requires_grad=False),
# dtypes=floating_types_and(torch.bfloat16, torch.float16),
# backward_dtypesIfCPU=floating_types_and(torch.bfloat16),
# dtypesIfCUDA=floating_types(),
# backward_dtypesIfCUDA=floating_types(),
# supports_inplace_autograd=False,
# decorators=[
# # Derivative wrt first tensor not implemented
# DecorateInfo(unittest.expectedFailure, "TestCommon",
# "test_floating_inputs_are_differentiable"),
# ],
# skips=(
# # test does not work with passing lambda for op
# # AssertionError: False is not true : Tensors failed to compare as equal!
# DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# # test fails are we permute the arguments function variant
# # but not for inplace or method.
# DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
# # TypeError: igammac(): argument 'input' (position 1) must be Tensor, not float
# DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'),
# )),
UnaryUfuncInfo('nn.functional.softshrink',
aten_name="softshrink",
aten_backward_name='softshrink_backward',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=False,
sample_inputs_func=sample_inputs_softshrink,
error_inputs_func=error_inputs_softshrink),
UnaryUfuncInfo('nn.functional.hardshrink',
aten_name="hardshrink",
aten_backward_name='hardshrink_backward',
dtypes=floating_types_and(torch.bfloat16,),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardshrink,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_nonfusible_nodes=["aten::hardshrink"]),
UnaryUfuncInfo('nn.functional.hardtanh',
aten_name="hardtanh",
aten_backward_name='hardtanh_backward',
dtypes=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.bfloat16),
backward_dtypes=all_types(),
dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.float16,
torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.float16),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardtanh,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_nonfusible_nodes=["aten::hardtanh"]),
OpInfo('nn.functional.gelu',
aten_name="gelu",
aten_backward_name='gelu_backward',
ref=reference_gelu if TEST_SCIPY else None,
error_inputs_func=error_inputs_gelu,
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_gelu,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_gradgrad=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_nonfusible_nodes=["aten::gelu"],
skips=(
# AssertionError: Tensor-likes are not close!
# May not replicate in CI
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),)),
UnaryUfuncInfo('nn.functional.relu6',
aten_name="relu6",
dtypes=all_types_and(torch.bfloat16),
backward_dtypes=floating_types(),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.float16),
assert_autodiffed=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_nonfusible_nodes=["aten::relu6"]),
OpInfo('mm',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16]
if (CUDA11OrLater or TEST_WITH_ROCM) else []),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_mm,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
)),
OpInfo('mode',
op=torch.mode,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Resized a non-empty tensor but did not warn about it
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
),
sample_inputs_func=sample_inputs_mode,),
MvlGammaInfo(variant_test_name='mvlgamma_p_1',
domain=(1, None),
skips=skips_mvlgamma() + \
(DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.float16, torch.int8)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=(torch.int8,)),),
sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})),
MvlGammaInfo(variant_test_name='mvlgamma_p_3',
domain=(2, None),
skips=skips_mvlgamma(skip_redundant=True) + (
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.float16, torch.int8)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=(torch.int8,)),
),
sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})),
MvlGammaInfo(variant_test_name='mvlgamma_p_5',
domain=(3, None),
skips=skips_mvlgamma(skip_redundant=True) + (
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.float16, torch.int8)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=(torch.int8,)),
),
sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})),
BinaryUfuncInfo('ne',
ref=np.not_equal,
aliases=('not_equal',),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
always_returns_bool=True,
supports_autograd=False,
skips=(
)),
OpInfo('narrow',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_narrow),
UnaryUfuncInfo('neg',
aliases=('negative', ),
ref=np.negative,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf),
error_inputs_func=error_inputs_neg,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
assert_autodiffed=True),
OpInfo('dist',
op=torch.dist,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
# torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got:
# Could not allocate memory to change Tensor SizesAndStrides!
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_dist),
OpInfo('outer',
op=torch.outer,
aliases=('ger', ),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_outer,),
OpInfo('ormqr',
op=torch.ormqr,
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_ormqr,
error_inputs_func=error_inputs_ormqr,
decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack],
skips=(
# ormqr does not support forward when complex inputs require grad
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'),
# Strides are not the same!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
)),
OpInfo('permute',
ref=np.transpose,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
assert_autodiffed=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
assert_jit_shape_analysis=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_permute,
reference_inputs_func=reference_inputs_permute),
BinaryUfuncInfo('pow',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf),
ref=np.power,
# Due to AVX2 curently not being fully supported for Float16, log_vml_cpu can't be enabled
# for Float16, causing this test to fail. pow's autograd for Float16 is thus currently
# unsupported on CPU.
backward_dtypes=floating_and_complex_types_and(torch.bfloat16),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
supports_one_python_scalar=True,
# Integer types do not support negative exponentes
rhs_make_tensor_kwargs=dict(low=0),
# Raising negative real numbers to fractional powers is not supported
lhs_make_tensor_kwargs=dict(low=0),
decorators=(
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05)}),
'TestBinaryUfuncs', 'test_reference_numerics'),
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05),
torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}),
'TestBinaryUfuncs', 'test_scalar_support'),
),
skips=(
# Skipping integers because they are being raised to negative powers causing an error
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_small_values',
dtypes=[torch.int8, torch.int16, torch.int32, torch.int64]),
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_large_values',
dtypes=[torch.int16, torch.int32, torch.int64]),
# FIXME Complex values error with: Greatest absolute difference: nan at index
# Ref: https://github.com/pytorch/pytorch/issues/76853
# For `chalf`, reference computation in `numpy` is computed in `cfloat`.
# Output of `chalf` saturates to `inf` quicker than reference due to its small range
# which leads to failure of this test.
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_batch_vs_slicing',
dtypes=(torch.complex32,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_non_contig',
dtypes=(torch.complex32,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics',
dtypes=(torch.complex32,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values',
dtypes=(torch.complex32, torch.complex64, torch.complex128)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values',
dtypes=(torch.complex32, torch.complex64, torch.complex128)),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values',
dtypes=(torch.complex32, torch.complex64, torch.complex128)),
)),
BinaryUfuncInfo('float_power',
ref=np.float_power,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
promotes_int_to_float=True,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_one_python_scalar=True,
# Integer types do not support negative exponentes
rhs_make_tensor_kwargs=dict(low=0),
# Raising negative real numbers to fractional powers is not supported
lhs_make_tensor_kwargs=dict(low=0),
decorators=(
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05),
torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}),
'TestBinaryUfuncs', 'test_scalar_support'),
),
skips=(
# FIXME
# AssertionError: Object comparison failed: torch.float64 != torch.float32
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'),
# -3.43399e+38 is outside the range of representable values of type 'float'
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Complex values error with: Greatest absolute difference: nan at index
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values',
dtypes=[torch.complex64, torch.complex128]),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values',
dtypes=[torch.complex64, torch.complex128]),
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values',
dtypes=[torch.complex64, torch.complex128]),
)),
OpInfo('qr',
op=torch.qr,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_qr_geqrf,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# In-place ops
check_batched_gradgrad=False,
decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]),
UnaryUfuncInfo('rad2deg',
ref=np.degrees,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
UnaryUfuncInfo('real',
ref=np.real,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
# Skip since real and imag don't have out variants.
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
)),
OpInfo(
"roll",
ref=np.roll,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
error_inputs_func=error_inputs_roll,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_roll,
decorators=(onlyNativeDeviceTypes,),
),
OpInfo(
"rot90",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
error_inputs_func=error_inputs_rot90,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_rot90,
),
# To test reference numerics against multiple values of argument `decimals`,
# we make multiple OpInfo entries with each entry corresponding to different value of decimals.
UnaryUfuncInfo('round',
ref=np.round,
aliases=('special.round',),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
assert_autodiffed=True,),
UnaryUfuncInfo('round',
ref=np.round,
variant_test_name='decimals_0',
aliases=('special.round',),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_kwargs=lambda device, dtype, input: ({'decimals': 0}, {'decimals': 0}),
sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 0}),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=False,
supports_sparse_csr=False),
UnaryUfuncInfo('round',
ref=np.round,
variant_test_name='decimals_3',
aliases=('special.round',),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_kwargs=lambda device, dtype, input: ({'decimals': 3}, {'decimals': 3}),
sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 3}),
skips=(
# test_ops already tested for this overload with `decimals_0` opinfo entry
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'),
),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=False,
supports_sparse_csr=False),
UnaryUfuncInfo('round',
ref=np.round,
variant_test_name='decimals_neg_3',
aliases=('special.round',),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_kwargs=lambda device, dtype, input: ({'decimals': -3}, {'decimals': -3}),
sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': -3}),
skips=(
# test_ops already tested for this overload with `decimals_0` opinfo entry
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'),
),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=False,
supports_sparse_csr=False),
UnaryUfuncInfo('sin',
ref=np.sin,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
handles_large_floats=False,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Fails on CUDA but passes on ROCm
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.cdouble,), device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
decorators=(precisionOverride({torch.bfloat16: 1e-2}),)),
UnaryUfuncInfo('sinc',
ref=np_sinc_with_fp16_as_fp32,
aliases=('special.sinc',),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
handles_large_floats=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2,
torch.float16: 1e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/49133
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=[torch.cfloat]),
)),
UnaryUfuncInfo('sinh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
decorators=(precisionOverride({torch.float16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.cdouble,)),
# Reference: https://github.com/pytorch/pytorch/issues/48641
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.int8]),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
UnaryUfuncInfo('sign',
ref=reference_sign,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
)),
UnaryUfuncInfo('sgn',
ref=reference_sgn,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
# Reference: https://github.com/pytorch/pytorch/issues/53958
# Test fails in comparison on Nan as the `equal_nan` is True for
# comparing the CPU tensors.
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.complex64, torch.complex128]),
# Reference: https://github.com/pytorch/pytorch/issues/48486
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.complex64]),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
OpInfo('split',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf),
sample_inputs_func=partial(sample_inputs_split, list_args=False),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
assert_autodiffed=True),
OpInfo('split',
# Cannot declare this aten_name because of
# test_variant_consistency_jit_split_list_args_cpu_float32
decomp_aten_name='split_with_sizes',
variant_test_name='list_args',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=partial(sample_inputs_split, list_args=True),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('split_with_sizes',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf),
sample_inputs_func=sample_inputs_split_with_sizes,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True),
BinaryUfuncInfo('__radd__',
op=torch.Tensor.__radd__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
supports_out=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',),
),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_nonfusible_nodes=['aten::add'],),
BinaryUfuncInfo('__rdiv__',
op=torch.Tensor.__rdiv__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
promotes_int_to_float=True,
lhs_make_tensor_kwargs={'exclude_zero': True},
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
skips=(
# https://github.com/pytorch/pytorch/issues/76806
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'),
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',),
),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],),
BinaryUfuncInfo('__rmul__',
op=torch.Tensor.__rmul__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
supports_out=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',),
),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
autodiff_nonfusible_nodes=['aten::mul'],),
BinaryUfuncInfo('__rand__',
op=torch.Tensor.__rand__,
dtypes=integral_types_and(torch.bool),
supports_out=False,
supports_autograd=False,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
)),
BinaryUfuncInfo('__ror__',
op=torch.Tensor.__ror__,
dtypes=integral_types_and(torch.bool),
supports_out=False,
supports_autograd=False,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
)),
BinaryUfuncInfo('__rxor__',
op=torch.Tensor.__rxor__,
dtypes=integral_types_and(torch.bool),
supports_out=False,
supports_autograd=False,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
)),
OpInfo('__rmatmul__',
op=torch.Tensor.__rmatmul__,
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16]
if (SM53OrLater and CUDA11OrLater) or TEST_WITH_ROCM else []),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_matmul,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
decorators=(
# NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater),
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestMathBits', 'test_conj_view'),
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1.2e-03)}),
'TestCommon', 'test_noncontiguous_samples'),
),
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',),
# https://github.com/pytorch/pytorch/issues/67470
DecorateInfo(unittest.skip("67470!"),
'TestCommon', 'test_noncontiguous_samples',
device_type='cpu', dtypes=(torch.long,)),
# Fails on XLA.
# AssertionError: False is not true : Tensors failed to compare as equal
DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),
# https://github.com/pytorch/pytorch/issues/71774
DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness',
device_type='cpu', dtypes=(torch.long,)),
)),
BinaryUfuncInfo('__rmod__',
op=torch.Tensor.__rmod__,
dtypes=floating_types_and(torch.bfloat16, torch.half,),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_two_python_scalars=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',),
),
# Support autograd after torch.remainder(Tensor, Tensor) supports
# autograd of the second argument.
# https://github.com/pytorch/pytorch/pull/58476/files#r637167630
# supports_autograd=False,
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::remainder'],),
BinaryUfuncInfo('__rpow__',
op=torch.Tensor.__rpow__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
# Reference: https://github.com/pytorch/pytorch/issues/54774
# "log2" "_vml_cpu" not implemented for Half
backward_dtypes=all_types_and_complex_and(torch.bfloat16),
backward_dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',),
# TODO: FIXME tolerance is too high
DecorateInfo(unittest.skip('Skipped!'), 'TestGradients'),
),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::pow'],),
BinaryUfuncInfo('__rsub__',
op=torch.Tensor.__rsub__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
supports_two_python_scalars=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',),
),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::rsub'],),
BinaryUfuncInfo('rsub',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
supports_inplace_autograd=False,
assert_autodiffed=None,
sample_inputs_func=sample_inputs_add_sub),
OpInfo('select',
aten_backward_name='select_backward',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf),
sample_inputs_func=sample_inputs_select,
assert_jit_shape_analysis=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('select_scatter',
dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_select_scatter,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo('slice_scatter',
dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_slice_scatter,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
UnaryUfuncInfo('signbit',
ref=np.signbit,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_autograd=False,),
UnaryUfuncInfo('tan',
ref=np.tan,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cuda', dtypes=[torch.float64],
active_if=TEST_WITH_ROCM),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
# tan(pi/2 * odd_number) is nan
reference_numerics_filter=NumericsFilter(
condition=lambda x: close_to_int(x / (math.pi * 0.5)), safe_val=math.pi)),
UnaryUfuncInfo('tanh',
ref=np.tanh,
aten_backward_name='tanh_backward',
aliases=('nn.functional.tanh',),
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
assert_jit_shape_analysis=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
# alias, nn.functional.tanh, will produce (because of warning string saved):
# "RuntimeError: Expected to not find "tanh" but found it"
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
# tan(j * pi/2 * odd_number) is nan
reference_numerics_filter=NumericsFilter(
condition=lambda x: (close_to_int(x / (math.pi * 0.5j))
if x.is_complex() else x.new_tensor(False, dtype=torch.bool)),
safe_val=0)),
OpInfo('tensor_split',
ref=np.array_split,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'),
),
sample_inputs_func=sample_inputs_tensor_split,),
OpInfo('hsplit',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_hsplit,
error_inputs_func=error_inputs_hsplit,),
OpInfo('vsplit',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_vsplit,
error_inputs_func=error_inputs_vsplit,),
OpInfo('dsplit',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_dsplit,
error_inputs_func=error_inputs_dsplit,),
OpInfo('triangular_solve',
op=torch.triangular_solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_legacy_solve,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# AssertionError: Scalars are not equal!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# Gradcheck fails
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad',
dtypes=floating_and_complex_types()),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
UnaryUfuncInfo('trunc',
aliases=('fix', ),
ref=np.trunc,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
assert_autodiffed=True),
UnaryUfuncInfo('exp2',
aliases=('special.exp2', ),
ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
UnaryUfuncInfo('expm1',
aliases=('special.expm1', ),
ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
assert_autodiffed=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/48926#issuecomment-739734774
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
UnaryUfuncInfo('nan_to_num',
ref=np.nan_to_num,
dtypes=all_types_and(torch.half, torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
skips=(
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
# Passing numpy_kwargs via sample_kwargs, as numpy does comparison
# with BFloat16 in float, since it currently doesn't support BFloat16.
# Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556
sample_kwargs=lambda device, dtype, input: ({},
{'posinf': torch.finfo(torch.bfloat16).max,
'neginf': torch.finfo(torch.bfloat16).min})
if dtype is torch.bfloat16 else ({}, {})),
UnaryUfuncInfo('reciprocal',
ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/45690
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/pull/49102#issuecomment-744604601
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=[torch.bfloat16]),
)),
UnaryUfuncInfo('rsqrt',
ref=lambda x: np.reciprocal(np.sqrt(x)),
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.half: 5e-2}),),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=(torch.cfloat, torch.cdouble)),
# AssertionError: Tensor-likes are not close!
# Greatest absolute difference: nan at index (700,) (up to 0.01 allowed)
# Greatest relative difference: nan at index (700,) (up to 0.001 allowed)
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.chalf,)),
)),
UnaryUfuncInfo('sqrt',
ref=np.sqrt,
supports_sparse=True,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_fwgrad_bwgrad=True,
decorators=(
precisionOverride({torch.bfloat16: 7e-2}),
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}),
'TestUnaryUfuncs', 'test_reference_numerics_large'),
),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/47358
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=(torch.cfloat, torch.cdouble),
active_if=IS_MACOS),
# Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
UnaryUfuncInfo('square',
ref=np.square,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/52549
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.cfloat, torch.cdouble]),
# >>> t = torch.tensor(complex(-0.01, float("inf")))
# >>> np.square(t.numpy())
# (-inf-infj)
# >>> t.square()
# tensor(-inf-infj)
# >>> t.cuda().square()
# tensor(inf+nanj, device='cuda:0')
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/pull/52551#issuecomment-782596181
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.bfloat16]),
),),
OpInfo('lerp',
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_lerp,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True),
OpInfo('linalg.inv',
aten_name='linalg_inv',
op=torch.linalg.inv,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_invertible,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
# AssertionError: Scalars are not equal!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
OpInfo('linalg.inv_ex',
aten_name='linalg_inv_ex',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_invertible,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
# AssertionError: Scalars are not equal!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
UnaryUfuncInfo('angle',
ref=np.angle,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.chalf),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_complex_to_float=True,
skips=(
# Ref: https://github.com/pytorch/pytorch/issues/78413
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_small',
dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64),),
)),
UnaryUfuncInfo('isfinite',
ref=np.isfinite,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
supports_out=False,
supports_autograd=False),
UnaryUfuncInfo('isinf',
ref=np.isinf,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
supports_out=False,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_autograd=False),
UnaryUfuncInfo('isposinf',
ref=np.isposinf,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_autograd=False),
UnaryUfuncInfo('isneginf',
ref=np.isneginf,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_autograd=False),
UnaryUfuncInfo('isreal',
ref=np.isreal,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
supports_out=False,
supports_autograd=False),
UnaryUfuncInfo('isnan',
ref=np.isnan,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_autograd=False),
OpInfo('linalg.solve',
aten_name='linalg_solve',
op=torch.linalg.solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_solve,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
OpInfo('linalg.solve_ex',
aten_name='linalg_solve_ex',
op=torch.linalg.solve_ex,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_solve,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
OpInfo('linalg.solve_triangular',
aten_name='linalg_solve_triangular',
op=torch.linalg.solve_triangular,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_solve_triangular,
supports_fwgrad_bwgrad=True,
skips=(skipCPUIfNoLapack,),
# linalg.solve_triangular cannot be batched over because of a call to out.copy_(result);
supports_forward_ad=True),
OpInfo('linalg.matrix_rank',
aten_name='linalg_matrix_rank',
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
),
),
OpInfo('linalg.matrix_rank',
aten_name='linalg_matrix_rank',
variant_test_name='hermitian',
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_pinv_hermitian,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
),
),
OpInfo('linalg.pinv',
aten_name='linalg_pinv',
op=torch.linalg.pinv,
dtypes=floating_and_complex_types(),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_linalg_pinv,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
# errors with "leaked XXXX bytes CUDA memory on device 0"
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),)
),
OpInfo('linalg.pinv',
aten_name='linalg_pinv',
variant_test_name='singular',
# pinv is Frechet-differentiable in a rank-preserving neighborhood,
# so we feed inputs that are the products of two full-rank factors,
# to avoid any rank changes caused by the perturbations in the gradcheck
op=lambda a, b: torch.linalg.pinv(a @ b.mT),
dtypes=floating_and_complex_types(),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_linalg_pinv_singular,
# Only large tensors show issues with implicit backward used prior to
# explicit backward implementation.
decorators=[slowTest, skipCUDAIfNoCusolver, skipCPUIfNoLapack],
skips=(
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# CUDA runs out of memory
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_fwgrad_bwgrad',
device_type='cuda', dtypes=[torch.cdouble]),
# This test takes almost 2 hours to run!
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad',
device_type='cuda', dtypes=[torch.cdouble]),
)),
OpInfo('linalg.pinv',
aten_name='linalg_pinv',
variant_test_name='hermitian',
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_linalg_pinv_hermitian,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)
),
OpInfo('eig',
op=torch.eig,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_eig,
error_inputs_func=error_inputs_eig,
decorators=[
skipCUDAIfNoMagma,
skipCPUIfNoLapack,
],
),
OpInfo('einsum',
# we need this lambda because SampleInput expects tensor input as the first argument
# TODO(@heitorschueroff) update SampleInput to handle such cases
op=lambda tensors, equation: torch.einsum(equation, tensors),
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,
*[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16]
if ((SM60OrLater and CUDA11OrLater)
or TEST_WITH_ROCM) else []),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
# See https://github.com/pytorch/pytorch/issues/66357
sample_inputs_func=sample_inputs_einsum,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# test does not work with passing lambda for op
# there's a test `test_einsum` in `test_jit.py` to handle this case
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('svd',
op=torch.svd,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_svd,
# Runs very slowly on slow-gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
# We're using at::allclose, which does not have a batching rule
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
OpInfo('linalg.svd',
op=torch.linalg.svd,
aten_name='linalg_svd',
decomp_aten_name='_linalg_svd',
dtypes=floating_and_complex_types(),
# Runs very slowly on slow-gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
check_batched_forward_grad=False,
# We're using at::allclose, which does not have a batching rule
check_batched_grad=False,
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_svd,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
OpInfo('linalg.svdvals',
op=torch.linalg.svdvals,
aten_name='linalg_svdvals',
decomp_aten_name='_linalg_svd',
dtypes=floating_and_complex_types(),
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
# We're using at::allclose, which does not have a batching rule
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_svdvals,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off]),
OpInfo('svd_lowrank',
op=lambda *args, **kwargs: wrapper_set_seed(
lambda a, b, **kwargs: torch.svd_lowrank(a @ b.mT, **kwargs),
*args, **kwargs
),
dtypes=floating_types(),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_svd_lowrank,
decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack, with_tf32_off,
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}),
'TestCommon', 'test_noncontiguous_samples',
device_type='cuda')],
skips=(
# test does not work with passing lambda for op
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('pca_lowrank',
op=lambda *args, **kwargs: wrapper_set_seed(
lambda a, b, **kwargs: torch.pca_lowrank(a @ b.mT, **kwargs),
*args, **kwargs
),
dtypes=floating_types(),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
check_batched_forward_grad=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_pca_lowrank,
decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack, with_tf32_off,
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}),
'TestCommon', 'test_noncontiguous_samples',
device_type='cuda')],
skips=(
# test does not work with passing lambda for op
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
BinaryUfuncInfo('polar',
dtypes=floating_types(),
# this function is undefined if 'abs' values are <0
supports_forward_ad=True,
lhs_make_tensor_kwargs=dict(low=0),
supports_rhs_python_scalar=False,
skips=(
# RuntimeError: Expected object of scalar type Float but got scalar type Double for second argument
DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_type_promotion'),
# GradcheckError: Jacobian computed with forward mode mismatch for output 0 with respect to input 0
# Numerical:
# tensor([[0.]], dtype=torch.float64)
# Analytical:
# tensor([[-0.0047]], dtype=torch.float64, grad_fn=<CopySlices>)
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad'),
)),
# TODO(@kshitij12345): Refactor similar to `mvlgamma` entries.
# To test reference numerics against multiple values of argument `n`,
# we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4).
# We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing.
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_0',
ref=reference_polygamma if TEST_SCIPY else None,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
),
sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})),
# A separate OpInfo entry for special.polygamma is needed to reorder the arguments
# for the alias. See the discussion here: https://github.com/pytorch/pytorch/pull/59691#discussion_r650261939
UnaryUfuncInfo('special.polygamma',
op=lambda x, n, **kwargs: torch.special.polygamma(n, x, **kwargs),
variant_test_name='special_polygamma_n_0',
ref=reference_polygamma if TEST_SCIPY else None,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
),
sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0}),
# polygamma functions have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_1',
ref=reference_polygamma if TEST_SCIPY else None,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large'),
),
sample_kwargs=lambda device, dtype, input: ({'n': 1}, {'n': 1}),
# polygamma functions have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_2',
ref=reference_polygamma if TEST_SCIPY else None,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
active_if=TEST_WITH_ROCM),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 2}, {'n': 2}),
# polygamma functions have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_3',
ref=reference_polygamma if TEST_SCIPY else None,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),),
sample_kwargs=lambda device, dtype, input: ({'n': 3}, {'n': 3}),
# polygamma functions have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_4',
ref=reference_polygamma if TEST_SCIPY else None,
decorators=(precisionOverride({torch.float16: 5e-4, torch.float32: 5e-4}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
active_if=TEST_WITH_ROCM),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 4}, {'n': 4}),
# polygamma functions have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
OpInfo('ravel',
ref=np.ravel,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_ravel,
),
OpInfo('reshape',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
sample_inputs_func=partial(sample_inputs_view_reshape, transpose_samples=True),
reference_inputs_func=partial(reference_inputs_view_reshape, transpose_samples=True),
error_inputs_func=error_inputs_reshape,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
OpInfo('reshape_as',
op=lambda x, other: x.reshape_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_view_as_reshape_as,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
)),
OpInfo('view',
op=lambda x, shape: x.view(shape),
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
sample_inputs_func=partial(sample_inputs_view_reshape, transpose_samples=False),
reference_inputs_func=partial(reference_inputs_view_reshape, transpose_samples=False),
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
)),
OpInfo('view_as',
op=lambda x, other: x.view_as(other),
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_view_as_reshape_as,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
)),
OpInfo('atleast_1d',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_atleast1d2d3d,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
),
OpInfo('atleast_2d',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
sample_inputs_func=sample_inputs_atleast1d2d3d,
),
OpInfo('atleast_3d',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
sample_inputs_func=sample_inputs_atleast1d2d3d,
),
OpInfo('flatten',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
ref=reference_flatten,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_flatten,
reference_inputs_func=reference_inputs_flatten,
),
OpInfo('unflatten',
op=torch.unflatten,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_unflatten,
),
OpInfo('column_stack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),),
sample_inputs_func=sample_inputs_column_stack,),
OpInfo('pinverse',
op=torch.pinverse,
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
device_type='mps', dtypes=[torch.float32]),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
device_type='mps', dtypes=[torch.float32]),
)),
OpInfo('gather',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_gather,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
error_inputs_func=error_inputs_gather,
),
OpInfo('index_fill',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_index),
OpInfo('index_copy',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
),
sample_inputs_func=sample_inputs_index,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('index_select',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_index,
error_inputs_func=error_inputs_index_select,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('index_add',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_index,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('index_reduce',
dtypes=all_types_and(torch.float16, torch.bfloat16),
supports_out=True,
skips=(
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
),
sample_inputs_func=sample_inputs_index_reduce),
OpInfo('__getitem__',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_inplace_autograd=False,
supports_scripting=False,
op=torch.Tensor.__getitem__,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# AssertionError: False is not true : Scalars failed to compare as equal! 0 != 104448
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),),
sample_inputs_func=sample_inputs_getitem),
OpInfo('index_put',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
supports_inplace_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
test_neg_view=False,
sample_inputs_func=sample_inputs_index_put,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# RuntimeError: The following operation failed in the TorchScript interpreter.
# Traceback of TorchScript (most recent call last):
# File "<string>", line 3, in forward
# def the_method(i0, i1: List[torch.Tensor], i2):
# return torch.index_put(i0, i1, i2, accumulate=False)
# ~~~~~~~~~~~~~~~ <--- HERE
# RuntimeError: a leaf Variable that requires grad is being used in an in-place operation.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('sort',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_sort,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
)),
OpInfo('unique',
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16),
sample_inputs_func=sample_inputs_unique,
supports_out=False,
supports_autograd=False,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# 76571
DecorateInfo(unittest.expectedFailure, 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values',
dtypes=(torch.float16, torch.float32, torch.float64)),
)),
OpInfo('unique_consecutive',
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16),
sample_inputs_func=sample_inputs_unique_consecutive,
supports_out=False,
supports_autograd=False,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# 76571
DecorateInfo(unittest.expectedFailure, 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values',
dtypes=(torch.float16, torch.float32, torch.float64)),
)),
OpInfo('put',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
check_batched_gradgrad=False, # vmap complains of the sizes
sample_inputs_func=sample_inputs_put),
OpInfo('take',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
check_batched_grad=False, # vmap complains of the sizes
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_take,
error_inputs_func=error_inputs_take),
OpInfo('scatter',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_scatter,
error_inputs_func=error_inputs_scatter_and_scatter_add),
UnaryUfuncInfo(
'bfloat16',
op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
skips=(
# autograd tests don't handle operators that change dtype
DecorateInfo(unittest.expectedFailure, 'TestGradients'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'),
)),
UnaryUfuncInfo(
'bool',
op=lambda x, *args, **kwargs: x.bool(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
UnaryUfuncInfo(
'byte',
op=lambda x, *args, **kwargs: x.byte(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
UnaryUfuncInfo(
'char',
op=lambda x, *args, **kwargs: x.char(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
UnaryUfuncInfo(
'double',
op=lambda x, *args, **kwargs: x.double(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
UnaryUfuncInfo(
'float',
op=lambda x, *args, **kwargs: x.float(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
skips=(
# autograd tests don't handle operators that change dtype
DecorateInfo(unittest.expectedFailure, 'TestGradients'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
UnaryUfuncInfo(
'half',
op=lambda x, *args, **kwargs: x.half(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=True,
skips=(
# autograd tests don't handle operators that change dtype
DecorateInfo(unittest.expectedFailure, 'TestGradients'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
UnaryUfuncInfo(
'int',
op=lambda x, *args, **kwargs: x.int(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
UnaryUfuncInfo(
'long',
op=lambda x, *args, **kwargs: x.long(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
UnaryUfuncInfo(
'short',
op=lambda x, *args, **kwargs: x.short(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
UnaryUfuncInfo(
'chalf',
op=lambda x, *args, **kwargs: x.chalf(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
skips=(
# autograd tests don't handle operators that change dtype
DecorateInfo(unittest.expectedFailure, 'TestGradients'),
# use of lambda doesn't work with test_normalize_operator_exhaustive
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# RuntimeError: "sum_cpu" not implemented for 'ComplexHalf'
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager',
device_type='cpu'),
# TypeError: 'int' object is not iterable
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# RuntimeError: "sum_cpu" not implemented for 'ComplexHalf'
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view',
device_type='cpu'),
# RuntimeError: "sum_cpu" not implemented for 'ComplexHalf'
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view',
device_type='cpu'),
# RuntimeError: "sum_cpu" not implemented for 'ComplexHalf'
# RuntimeError: "neg_conj_cuda" not implemented for 'ComplexHalf'
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
)
),
OpInfo('empty_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
reference_inputs_func=reference_inputs_like_fns,
supports_autograd=False,
skips=(
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"),
"TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_complex_half_reference_testing'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'),
DecorateInfo(unittest.skip("Expected: empty_like is not comparable"), 'TestCompositeCompliance',
'test_operator'),
)),
OpInfo('zeros_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
skips=(
)),
OpInfo('ones_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
skips=(
)),
OpInfo('randn_like',
dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex32, torch.complex64, torch.complex128),
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.randn_like, inp, *args, **kwargs),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
supports_sparse_csr=True,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"),
'TestCommon', 'test_complex_half_reference_testing'),
)),
OpInfo('rand_like',
dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex32, torch.complex64, torch.complex128),
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.randn_like, inp, *args, **kwargs),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"),
'TestCommon', 'test_complex_half_reference_testing'),
)),
OpInfo('randint_like',
dtypes=all_types_and(torch.half, torch.bfloat16),
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.randint_like, inp, *args, **kwargs),
supports_out=False,
sample_inputs_func=sample_inputs_randint_like,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('full_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_full_like,
supports_autograd=False,
skips=(
)),
OpInfo('new_zeros',
op=lambda x, *args, **kwargs: x.new_zeros(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_new_fns,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
),
supports_autograd=False),
OpInfo('new_ones',
op=lambda x, *args, **kwargs: x.new_ones(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_new_fns,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
),
supports_autograd=False),
OpInfo('new_empty',
op=lambda x, *args, **kwargs: x.new_empty(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_new_fns,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'),
DecorateInfo(unittest.skip("Expected: new_empty is not comparable"), 'TestCompositeCompliance',
'test_operator'),
DecorateInfo(unittest.skip("Expected: new_empty is not comparable"),
'TestCommon', 'test_complex_half_reference_testing'),
),
supports_autograd=False),
OpInfo('empty',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_empty,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCompositeCompliance',
'test_operator'),
# requires_grad doesn't exist in the jit schema
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestLazyOpInfo'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon', 'test_complex_half_reference_testing'),
)),
OpInfo('new_full',
op=lambda x, *args, **kwargs: x.new_full(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
supports_out=False,
sample_inputs_func=sample_inputs_new_full,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
),
supports_autograd=False),
OpInfo('multinomial',
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.multinomial, inp, *args, **kwargs),
method_variant=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.Tensor.multinomial, inp, *args, **kwargs),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
supports_out=True,
sample_inputs_func=sample_inputs_multinomial,
error_inputs_func=error_inputs_multinomial,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Strides are not the same!
# This may not be reproducible in CI
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning')),
supports_autograd=False),
OpInfo('normal',
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.normal, inp, *args, **kwargs),
# The inplace variant (Tensor.normal_) is different from torch.normal
inplace_variant=None,
dtypes=floating_types_and(torch.bfloat16, torch.half),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half),
supports_out=True,
sample_inputs_func=sample_inputs_normal_tensor_first,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Tensor-likes are not close!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# Computed gradient is incorrect -- would be an exfail but gradgrad somehow passes
DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestGradients'),)),
OpInfo('normal',
# This has its own variant b/c OpInfos assume the first arg is a Tensor but it is not here
variant_test_name='number_mean',
op=lambda std, mean, *args, **kwargs:
wrapper_set_seed(torch.normal, mean, std, *args, **kwargs),
# The inplace variant (Tensor.normal_) is different from torch.normal
inplace_variant=None,
dtypes=floating_types_and(torch.bfloat16, torch.half),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half),
supports_out=True,
sample_inputs_func=sample_inputs_normal_tensor_second,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# NotImplementedError not raised
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad'),
# Computed gradient is incorrect -- would be an exfail but gradgrad somehow passes
DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestGradients'),)),
OpInfo('bernoulli',
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.bernoulli, inp, *args, **kwargs),
# The inplace variant (Tensor.bernoulli_) is different from torch.bernoulli
inplace_variant=None,
method_variant=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.Tensor.bernoulli, inp, *args, **kwargs),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half),
supports_out=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_bernoulli,
skips=(
# vmap: We do not yet support calling random operations inside of vmap
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_forward_mode_AD'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Expected RuntimeError when doing an unsafe cast from a result of
# dtype torch.float32 into an out= with dtype torch.lon
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'))),
OpInfo('scatter_add',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_scatter_add,
error_inputs_func=error_inputs_scatter_and_scatter_add,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
OpInfo('stack',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_stack,
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# https://github.com/pytorch/pytorch/issues/77046
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
),
),
OpInfo('hstack',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
error_inputs_func=error_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
BinaryUfuncInfo('hypot',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_rhs_python_scalar=False),
OpInfo('histogram',
dtypes=floating_types(),
dtypesIfCUDA=_dispatch_dtypes(), # histogram is only implemented on CPU
sample_inputs_func=sample_inputs_histogram,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
# RuntimeError:
# undefined value tensor:
# File "<string>", line 3
# def the_method(i0):
# return torch.histogram(i0, 1, weight=tensor(-0.5735, dtype=torch.float32), density=False)
# ~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Not Implemented on XLA.
DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla'),
)),
OpInfo('histogramdd',
dtypes=floating_types(),
dtypesIfCUDA=_dispatch_dtypes(), # histogramdd is only implemented on CPU
sample_inputs_func=sample_inputs_histogramdd,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('histc',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64),
sample_inputs_func=sample_inputs_histc,
supports_out=True,
supports_autograd=False,
skips=(
# CUDA histc returns a float tensor but does not correctly warn when passed an integral out tensor
# "AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast
# from a result of dtype torch.float32 into an out= with dtype torch.long"
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'),
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'),
)),
OpInfo('bincount',
dtypes=integral_types_and(),
sample_inputs_func=sample_inputs_bincount,
supports_out=False,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('bucketize',
dtypes=all_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
sample_inputs_func=sample_inputs_bucketize,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('searchsorted',
dtypes=all_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and(torch.float16),
sample_inputs_func=sample_inputs_searchsorted,
supports_autograd=False,
ref=reference_searchsorted,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('cat',
ref=_cat_np,
aliases=('concat',),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32),
sample_inputs_func=sample_inputs_cat_concat,
reference_inputs_func=reference_inputs_cat,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
skips=(
# RuntimeError: Arguments for call not valid.
# Expected a value of type 'List[Tensor]' for argument
# 'tensors' but instead found type 'Tensor (inferred)'.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),
# see https://github.com/pytorch/pytorch/issues/71286
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'),)),
OpInfo('unbind',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
ref=reference_unbind,
sample_inputs_func=sample_inputs_unbind,
error_inputs_func=error_inputs_unbind,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_gradgrad=True,
supports_out=False,
),
OpInfo('vstack',
aliases=('row_stack',),
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
error_inputs_func=error_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# RuntimeError: _fn() Expected a value of type
# 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)),
OpInfo('dstack',
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
error_inputs_func=error_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
),
OpInfo('unfold',
op=lambda x, *args: x.unfold(*args),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
backward_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_gradgrad=False,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Skip operator schema test because this is a functional and not an operator
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
),
sample_inputs_func=sample_inputs_unfold),
OpInfo('msort',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
check_batched_gradgrad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_msort,
skips=(
)),
OpInfo('movedim',
aliases=('moveaxis',),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_movedim_moveaxis),
OpInfo('renorm',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_renorm,
error_inputs_func=error_inputs_renorm),
ShapeFuncInfo('repeat',
op=lambda x, dims: x.repeat(dims),
ref=np.tile,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_repeat_tile,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
)),
OpInfo('squeeze',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
assert_autodiffed=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
assert_jit_shape_analysis=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_squeeze),
UnaryUfuncInfo(
'fill',
op=_fill_aten,
ref=_fill_np,
method_variant=None,
inplace_variant=torch.Tensor.fill_,
sample_kwargs=_fill_sample_kwargs,
sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'value': True}),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
skips=(
# JIT has issue when op is passed as lambda
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("No fill_ op"), 'TestCudaFuserOpInfo'),
DecorateInfo(unittest.skip("No fill_ op"), 'TestNNCOpInfo'),
)),
OpInfo('resize_',
op=lambda x, shape: x.clone().resize_(shape),
method_variant=None,
inplace_variant=torch.Tensor.resize_,
# the test fails because resize_ doesn't work with imag views as expected by the test
# https://github.com/pytorch/pytorch/issues/65945
test_neg_view=False,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
skips=(
# Cannot resize variables that require grad
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'),
),
sample_inputs_func=sample_inputs_resize_ops),
OpInfo('resize_as_',
op=lambda x, other: torch.resize_as_(x.clone(), other),
method_variant=None,
inplace_variant=torch.Tensor.resize_as_,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
skips=(
# Cannot resize variables that require grad
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'),
DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'),
),
sample_inputs_func=sample_inputs_resize_ops),
OpInfo('take_along_dim',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_take_along_dim,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
ShapeFuncInfo('tile',
ref=np.tile,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_repeat_tile),
OpInfo('trapz', # TODO: in the future, 'trapz' should be made a proper alias of 'trapezoid'
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_trapezoid),
OpInfo('trapezoid',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_trapezoid),
OpInfo('cumulative_trapezoid',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
supports_out=False,
sample_inputs_func=sample_cumulative_trapezoid,),
OpInfo('unsqueeze',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
assert_jit_shape_analysis=True,
assert_autodiffed=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
sample_inputs_func=sample_unsqueeze),
BinaryUfuncInfo('xlogy',
aliases=('special.xlogy',),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
promotes_int_to_float=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_one_python_scalar=True,
skips=(
# nan vs nan comparisons
# https://github.com/pytorch/pytorch/issues/74279
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
)),
OpInfo('zero_',
op=lambda x: torch.zero_(x.clone()),
method_variant=None,
inplace_variant=torch.Tensor.zero_,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_gradgrad=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_zero_),
BinaryUfuncInfo('special.xlog1py',
aten_name='special_xlog1py',
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
backward_dtypes=all_types_and(torch.bool, torch.bfloat16),
backward_dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
promotes_int_to_float=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_one_python_scalar=True,
skips=(
# nan vs 0 comparisons
# https://github.com/pytorch/pytorch/issues/74279
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
)),
BinaryUfuncInfo('special.zeta',
aten_name='special_zeta',
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
supports_autograd=False,
supports_one_python_scalar=True),
# TODO: FIXME
# OpInfo entry to verify the gradient formula of `other`/`q`
# BinaryUfuncInfo('special.zeta',
# op=lambda q, x, **kwargs: torch.special.zeta(x, q, **kwargs),
# aten_name='special_zeta',
# variant_test_name='grad',
# dtypes=all_types_and(torch.bool),
# promotes_int_to_float=True,
# supports_autograd=True,
# supports_rhs_python_scalar=False,
# decorators=[
# # Derivative wrt first tensor not implemented
# DecorateInfo(unittest.expectedFailure, "TestCommon",
# "test_floating_inputs_are_differentiable")
# ],
# skips=(
# # Lambda doesn't work in JIT test
# # AssertionError: JIT Test does not execute any logic
# DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"),
# )),
OpInfo('logsumexp',
aliases=('special.logsumexp',),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
gradcheck_fast_mode=False,
sample_inputs_func=sample_inputs_logsumexp),
OpInfo('trace',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
error_inputs_func=error_inputs_trace,
supports_inplace_autograd=False,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_trace),
OpInfo('transpose',
ref=_numpy_ref_transpose,
aliases=('swapdims', 'swapaxes'),
assert_jit_shape_analysis=True,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
sample_inputs_func=sample_inputs_transpose_swapdims),
OpInfo('T',
op=lambda x: x.T,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_T),
OpInfo('H',
op=lambda x: x.H,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_T),
OpInfo('mT',
op=lambda x: x.mT,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_adjoint),
OpInfo('mH',
op=lambda x: x.mH,
aliases=('adjoint',),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_adjoint),
OpInfo('tril',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_tril_triu),
OpInfo('triu',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_tril_triu),
OpInfo('kron',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_kron),
OpInfo('inner',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16]
if (CUDA11OrLater or TEST_WITH_ROCM) else []),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_inner,
),
OpInfo('tensordot',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16]
if (CUDA11OrLater or TEST_WITH_ROCM) else []),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_tensordot,
skips=(
# Skip operator schema test because this is a functional and not an operator.
# Reference: https://github.com/pytorch/pytorch/issues/54574
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)
),
OpInfo('to_sparse',
op=lambda x, *args: x.to_sparse(*args),
sample_inputs_func=sample_inputs_to_sparse,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
backward_dtypes=floating_types(),
backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_sparse_csr=True,
supports_sparse_csc=True,
check_batched_grad=False,
check_batched_gradgrad=False,
skips=(
# to_sparse does not support automatic differentiation for outputs with complex dtype
DecorateInfo(unittest.expectedFailure, 'TestGradients',
'test_nondifferentiable', dtypes=(torch.cdouble,)),
# NotImplementedError: Could not run 'aten::normal_' with arguments from the 'SparseCPU' backend
DecorateInfo(unittest.skip(""), 'TestCommon', 'test_noncontiguous_samples'),
# TODO: FIXME: complex inputs requiring grad error in forward
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# lambda impl
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Allowed exception: sparse tensors don't have strides
DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'),
DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_backward'),
DecorateInfo(unittest.skip("Allowed exception"), 'TestTags', 'test_tags'),
# TODO: implement csr.to_sparse(sample_dim) where sampled_dim is 1.
DecorateInfo(unittest.skip("csr.to_sparse(1) not implemented. Skipped!"),
'TestSparseCSR', 'test_sparse_csr_consistency'),
)
),
OpInfo('logcumsumexp',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
backward_dtypes=floating_types_and(torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.bfloat16),
skips=(
# AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cuda'),
),
sample_inputs_func=sample_inputs_logcumsumexp,
error_inputs_func=error_inputs_logcumsumexp),
UnaryUfuncInfo('sigmoid',
aliases=('special.expit', 'nn.functional.sigmoid'),
aten_backward_name='sigmoid_backward',
ref=reference_sigmoid if TEST_SCIPY else None,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.complex64: 1e-1,
torch.bfloat16: 1e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/56012
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.complex64, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.chalf, torch.complex64, torch.cdouble]),
# alias, nn.functional.sigmoid, will produce (because of warning string saved):
# "RuntimeError: Expected to not find "sigmoid" but found it"
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping')),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.complex32, torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
# sigmoid(z) = 1 / (1 + exp(-z)), at z = j * pi * odd_number, the denominator is zero
reference_numerics_filter=NumericsFilter(
condition=lambda x: (close_to_int(x / (math.pi * 1j))
if x.is_complex() else x.new_tensor(False, dtype=torch.bool)),
safe_val=0)),
UnaryUfuncInfo('digamma',
ref=scipy.special.digamma if TEST_SCIPY else None,
aliases=('special.psi', 'special.digamma',),
decorators=(precisionOverride({torch.float16: 5e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
UnaryUfuncInfo('special.entr',
ref=scipy.special.entr if TEST_SCIPY else None,
aten_name='special_entr',
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(precisionOverride({torch.float16: 1e-1,
torch.bfloat16: 1e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.bfloat16, torch.float16]),
),
supports_inplace_autograd=False,
sample_inputs_func=sample_inputs_entr),
UnaryUfuncInfo('special.ndtri',
ref=scipy.special.ndtri if TEST_SCIPY else None,
domain=(0, 1),
aten_name='special_ndtri',
dtypes=all_types_and(torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
UnaryUfuncInfo('special.log_ndtr',
aten_name='special_log_ndtr',
ref=scipy.special.log_ndtr if TEST_SCIPY else None,
dtypes=all_types_and(torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
UnaryUfuncInfo('erf',
ref=scipy.special.erf if TEST_SCIPY else None,
aliases=('special.erf', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
assert_jit_shape_analysis=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
UnaryUfuncInfo('erfc',
ref=scipy.special.erfc if TEST_SCIPY else None,
aliases=('special.erfc', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
UnaryUfuncInfo('erfinv',
ref=scipy.special.erfinv if TEST_SCIPY else None,
aliases=('special.erfinv', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2,
torch.float32: 1e-4}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_sparse_csr=True,
supports_sparse_csc=True,
supports_sparse_bsr=True,
supports_sparse_bsc=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
domain=(-1, 1),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
active_if=TEST_SCIPY and LooseVersion(scipy.__version__) < "1.4.0"),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
active_if=TEST_SCIPY and LooseVersion(scipy.__version__) < "1.4.0"),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
active_if=TEST_SCIPY and LooseVersion(scipy.__version__) < "1.4.0"),
)),
OpInfo("nn.functional.smooth_l1_loss",
ref=reference_smooth_l1_loss,
sample_inputs_func=sample_inputs_smooth_l1_loss,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
backward_dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16),
backward_dtypesIfCUDA=floating_types_and(torch.float16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED
# at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),)),
OpInfo(
"nn.functional.l1_loss",
ref=loss_reference_reduction_wrapper(lambda input, target: np.abs(input - target)),
sample_inputs_func=sample_inputs_l1_loss,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED
# at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch.
DecorateInfo(
unittest.expectedFailure,
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
),
),
UnaryUfuncInfo('lgamma',
ref=reference_lgamma if TEST_SCIPY else None,
aliases=('special.gammaln', ),
decorators=(precisionOverride({torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50140#discussion_r552615345
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small',
device_type='cpu', dtypes=[torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
),
# lgamma have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
OpInfo(
'logdet',
dtypes=floating_and_complex_types(),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]),
# `log_softmax` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
OpInfo(
'log_softmax',
aliases=('special.log_softmax', 'nn.functional.log_softmax'),
supports_out=True,
aten_backward_name='_log_softmax_backward_data',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
supports_forward_ad=True,
assert_autodiffed=True),
OpInfo(
'log_softmax',
variant_test_name='dtype',
aliases=('special.log_softmax', 'nn.functional.log_softmax'),
supports_out=True,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
supports_forward_ad=True,
assert_autodiffed=True),
UnaryUfuncInfo('logit',
aten_backward_name='logit_backward',
ref=scipy.special.logit if TEST_SCIPY else None,
domain=(0, 1),
aliases=('special.logit', ),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(precisionOverride({torch.bfloat16: 5e-1,
torch.float16: 5e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_logit),
OpInfo('where',
# Currently only the `input` is tested in gradcheck.
# If we pass `condition` first, none of the input which supports
# autograd will be tested. Hence the following lambda.
op=lambda self, condition, other: torch.where(condition, self, other),
ref=lambda self, condition, other: np.where(condition, self, other),
sample_inputs_func=sample_inputs_where,
reference_inputs_func=reference_inputs_where,
error_inputs_func=error_inputs_where,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=(
DecorateInfo(onlyCUDA, "TestCommon", 'test_errors'),),
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf)),
OpInfo('nonzero',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf),
sample_inputs_func=sample_inputs_nonzero,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# nonzero(): argument 'out' must be Tensor, not tuple
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# https://github.com/pytorch/pytorch/issues/67458
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# nonzero is not raising a warning when the out is resized
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
# Following tests are for jiterator's python interface
# Jiterator can be used to author elementwise CUDA kernel
# jiterator._create_jit_fn returns a callable that behaves like a regular pytorch op
# See create_jit_fn in jiterator.py for more information
UnaryUfuncInfo(
'jiterator_unary',
op=torch.cuda.jiterator._create_jit_fn("template <typename T> T unary(T x) { return x * x + x; }"),
ref=lambda x: x * x + x,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool),
supports_out=False,
supports_autograd=False, # jiterator ops doesn't have backward defined
decorators=[
onlyCUDA,
skipCUDAIfRocm,
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestUnaryUfuncs', 'test_reference_numerics_hard'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestUnaryUfuncs', 'test_reference_numerics_normal'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestUnaryUfuncs', 'test_reference_numerics_small'),
],
skips=(
# Jiterator ops doesn't support neg or conj view
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Jiterator ops doesn't suport CompositeCompliantTensor
# Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped
DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'),
# Skip reference_numerics tests for bool type, as the defined function doesn't work for bool
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bool]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bool]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.bool]),
# Expected failure: torch.jiterator_unary is not a valid op
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Skip Nvfuser
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'),
)
),
BinaryUfuncInfo(
'jiterator_binary',
op=torch.cuda.jiterator._create_jit_fn(
"template <typename T> T binary(T x, T y, T alpha) { return x + alpha * y; }", alpha=1),
ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \
else np.add(input, np.multiply(alpha, other)),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool),
sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-3.14),
supports_out=False,
supports_autograd=False, # jiterator ops doesn't have backward defined
supports_rhs_python_scalar=False,
decorators=[onlyCUDA, skipCUDAIfRocm],
skips=(
# Jiterator ops doesn't support neg or conj view
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Jiterator ops doesn't suport CompositeCompliantTensor
# Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped
DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'),
# Expected failure: torch.jiterator_binary is not a valid op
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Skip Nvfuser
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'),
)
),
OpInfo(
'jiterator_4inputs_with_extra_args',
op=torch.cuda.jiterator._create_jit_fn(
"template <typename T> T binary(T i0, T i1, T i2, T i3, T alpha, T beta) { return alpha * i0 + beta * i1 + i2 + i3; }",
alpha=1, beta=1),
ref=lambda i0, i1, i2, i3, *, alpha=1, beta=1: alpha * i0 + beta * i1 + i2 + i3,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool),
sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=4, alpha=3.14, beta=-4.20),
supports_out=False,
supports_autograd=False, # jiterator ops doesn't have backward defined
decorators=[onlyCUDA, skipCUDAIfRocm],
skips=(
# Jiterator ops doesn't support neg or conj view
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Jiterator ops doesn't suport CompositeCompliantTensor
# Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped
DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'),
# Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Skip Nvfuser
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'),
)
),
BinaryUfuncInfo(
'jiterator_binary_return_by_ref',
op=torch.cuda.jiterator._create_multi_output_jit_fn(
"""
template <typename T>
T binary_return_by_ref(T i0, T i1, T& out0) {
out0 = i0 + i1;
}
""",
num_outputs=1),
ref=lambda i0, i1: i0 + i1,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool),
sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-0.42),
supports_out=False,
supports_autograd=False, # jiterator ops doesn't have backward defined
supports_rhs_python_scalar=False,
decorators=[onlyCUDA, skipCUDAIfRocm],
skips=(
# Jiterator ops doesn't support neg or conj view
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Jiterator ops doesn't suport CompositeCompliantTensor
# Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped
DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'),
# Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Skip Nvfuser
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'),
)
),
OpInfo(
'jiterator_2inputs_2outputs',
op=torch.cuda.jiterator._create_multi_output_jit_fn(
"""
template <typename T>
T binary_2outputs(T i0, T i1, T& out0, T& out1) {
out0 = i0 + i1;
out1 = i0 - i1;
}
""",
num_outputs=2),
ref=lambda i0, i1, *, alpha=1: (i0 + i1, i0 - i1),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool),
sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2),
supports_out=False,
supports_autograd=False, # jiterator ops doesn't have backward defined
decorators=[onlyCUDA, skipCUDAIfRocm],
skips=(
# Jiterator ops doesn't support neg or conj view
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Jiterator ops doesn't suport CompositeCompliantTensor
# Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped
DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'),
# Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Skip Nvfuser
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'),
)
),
# `torch.norm` has multiple code paths depending on the value of `p`.
# These paths have different dtype support. Also JIT supports,
# most variants but not all of them. So we split the OpInfo entries,
# for `norm` based on the code-paths and JIT support.
OpInfo(
"norm",
sample_inputs_func=sample_inputs_norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast from a result
# of dtype torch.float32 into an out= with dtype torch.long
DecorateInfo(
unittest.expectedFailure,
"TestCommon",
"test_out",
device_type="meta",
),
),
),
OpInfo('norm',
variant_test_name='nuc',
aten_name='nuclear_norm',
sample_inputs_func=sample_inputs_norm_nuc,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],
check_batched_gradgrad=False,
# torch.autograd.gradcheck.GradcheckError: While computing batched gradients
# got: Could not allocate memory to change Tensor SizesAndStrides!
check_batched_forward_grad=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types(),
skips=(
# RuntimeError not raised :
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# RuntimeError:
# Arguments for call are not valid.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)), # noqa: B950
)
),
OpInfo('norm',
variant_test_name='fro',
aten_name='frobenius_norm',
sample_inputs_func=sample_inputs_norm_fro,
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
# torch.autograd.gradcheck.GradcheckError: While computing batched gradients
# got: Could not allocate memory to change Tensor SizesAndStrides!
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# Arguments for call are not valid.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)), # noqa: B950
)),
OpInfo(
"norm",
variant_test_name="inf",
sample_inputs_func=sample_inputs_norm_inf,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# fast gradcheck produces NaNs
gradcheck_fast_mode=False,
skips=(
# AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast from a result
# of dtype torch.float32 into an out= with dtype torch.long
DecorateInfo(
unittest.expectedFailure,
"TestCommon",
"test_out",
device_type="meta",
),
),
),
OpInfo('t',
sample_inputs_func=sample_inputs_t,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
# vmap does not support inplace views
check_inplace_batched_forward_grad=False,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
assert_autodiffed=True,
error_inputs_func=error_inputs_t),
UnaryUfuncInfo('special.erfcx',
ref=scipy.special.erfcx if TEST_SCIPY else None,
aten_name='special_erfcx',
decorators=(toleranceOverride({torch.float32: tol(atol=0, rtol=4e-6), }),),
dtypes=all_types_and(torch.bool),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True),
OpInfo(
"nn.functional.dropout",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Probably because we have used lambda for the op here
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# inplace variant dispatches to dropout kernel, while on CUDA
# the op dispatches to _fused_dropout (with a few more conditions)
# hence, different values and this skip here
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'),),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False,
sample_inputs_func=sample_inputs_dropout,
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs, inplace=True)),
OpInfo(
"nn.functional.dropout2d",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
check_batched_forward_grad=False,
# As per the docs, valid input dims are (3, 4)
sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(3, 4)),
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs, inplace=True)),
OpInfo(
"nn.functional.dropout3d",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout3d, input, *args, **kwargs),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
check_batched_forward_grad=False,
# As per the docs, valid input dims are (4, 5)
sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(4, 5)),
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout3d, input, *args, **kwargs, inplace=True)),
# In training mode, feature_alpha_dropout currently doesn't support inputs of complex dtype
# unlike when `train=False`, it supports complex inputs, hence 2 OpInfos to cover all cases
OpInfo(
"nn.functional.feature_alpha_dropout",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs),
variant_test_name="with_train",
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got:
# vmap: We do not yet support calling random operations inside of vmap.
# Please perform random operations outside of vmap as a workaround
DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_forward_mode_AD"),
DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_inplace_forward_mode_AD"),),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
# As per the docs, valid input dims are (4, 5)
sample_inputs_func=partial(sample_inputs_dropout, train=True, valid_input_dim=(4, 5)),
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)),
OpInfo(
"nn.functional.feature_alpha_dropout",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs),
variant_test_name="without_train",
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),),
gradcheck_wrapper=wrapper_set_seed,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
sample_inputs_func=partial(sample_inputs_dropout, train=False),
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)),
OpInfo(
"nn.functional.one_hot",
ref=reference_one_hot,
supports_out=False,
dtypes=_dispatch_dtypes((torch.int64,)),
sample_inputs_func=sample_inputs_one_hot,
),
OpInfo(
"nn.functional.embedding",
aten_backward_name="embedding_dense_backward",
# We use lambda to reshuffle the positional arguments.
# This is because currently only the `input` field of SampleInput
# is tested in gradient tests.
op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(idx, weight, **kwargs),
dtypes=floating_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_embedding,
error_inputs_func=error_inputs_embedding,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Reference: https://github.com/pytorch/pytorch/issues/67084
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'),
# Not a problem: embedding does weird stuff to its input (it renormalizes)
DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'),
),
supports_expanded_weight=True,
supports_out=False,
),
OpInfo(
"nn.functional.embedding_bag",
# We use lambda to reshuffle the positional arguments.
# This is because currently only the `input` field of SampleInput
# is tested in gradient tests.
op=lambda weight, idx, **kwargs: torch.nn.functional.embedding_bag(idx, weight, **kwargs),
dtypes=floating_types_and(torch.float16),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
# backward is not supported for mode `max` and dtype `bfloat16`
backward_dtypesIfCUDA=floating_types_and(torch.float16),
sample_inputs_func=sample_inputs_embedding_bag,
skips=(
# lambda impl
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Not a problem: embedding_bag does weird stuff to its input (it renormalizes)
DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False,
supports_gradgrad=False,
),
UnaryUfuncInfo(
"nn.functional.softplus",
aten_backward_name='softplus_backward',
ref=reference_softplus,
sample_kwargs=lambda device, dtype, input: ({'beta': 3, 'threshold': .2}, {'beta': 3, 'threshold': .2}),
sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'beta': 3, 'threshold': .2}),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
decorators=(
DecorateInfo(
toleranceOverride
({
torch.half: tol(atol=1e-2, rtol=1e-2),
torch.bfloat16: tol(atol=1e-2, rtol=1e-2),
}),
'TestUnaryUfuncs'),
),
),
OpInfo(
"linalg.tensorinv",
ref=np.linalg.tensorinv,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_tensorinv,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver],
),
OpInfo(
"linalg.tensorsolve",
ref=lambda a, b, dims=None: np.linalg.tensorsolve(a, b, axes=dims),
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_tensorsolve,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack,
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}),
'TestCommon', 'test_noncontiguous_samples',
device_type='cuda')],
),
OpInfo(
"nn.functional.mse_loss",
aten_backward_name='mse_loss_backward',
ref=loss_reference_reduction_wrapper(lambda input, target: (input - target) ** 2),
sample_inputs_func=sample_inputs_loss,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.float16),
backward_dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
backward_dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
skips=(
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
),
),
OpInfo(
"nn.functional.grid_sample",
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
supports_out=False,
sample_inputs_func=sample_inputs_grid_sample,
supports_gradgrad=False,
gradcheck_nondet_tol=1e-15),
OpInfo(
"argwhere",
ref=np.argwhere,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
sample_inputs_func=sample_inputs_argwhere,
),
ReductionOpInfo(
'all',
identity=True,
supports_multiple_dims=False,
supports_autograd=False,
result_dtype=torch.bool,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.all),
skips=(
# FIXME: does not support passing keepdim without dim
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: does not support dim=None
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: uint8 input returns uint8 instead of bool
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),
),
),
ReductionOpInfo(
'any',
identity=False,
supports_multiple_dims=False,
supports_autograd=False,
result_dtype=torch.bool,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.any),
skips=(
# FIXME: does not support passing keepdim without dim
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: does not support dim=None
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: uint8 input returns uint8 instead of bool
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),
),
),
ReductionOpInfo(
'amax',
nan_policy='propagate',
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
ref=reference_reduction_numpy(np.amax),
skips=(
# FIXME: reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
),
error_inputs_func=error_inputs_aminmax_amax_amin,
),
ReductionOpInfo(
'amin',
nan_policy='propagate',
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
ref=reference_reduction_numpy(np.amin),
skips=(
# FIXME: reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
),
error_inputs_func=error_inputs_aminmax_amax_amin,
),
ReductionOpInfo(
'argmax',
supports_multiple_dims=False,
supports_autograd=False,
assert_jit_shape_analysis=True,
result_dtype=torch.int64,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.argmax, supports_keepdims=False),
skips=(
# FIXME: keepdim parameter is ignored when dim=None
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
'argmin',
supports_multiple_dims=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.argmin, supports_keepdims=False),
skips=(
# FIXME: keepdim parameter is ignored when dim=None
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
'count_nonzero',
identity=0,
supports_out=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_reduction_count_nonzero,
ref=reference_reduction_numpy(np.count_nonzero),
skips=(
# FIXME: count_nonzero does not accept keepdim kwarg
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_unsorted_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_offbounds_keepdim'),
# FIXME: dim=[] reduces all dimensions
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
),
),
ReductionOpInfo(
'mean',
nan_policy='propagate',
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# FIXME: mean needs 'dim' parameter when using the 'out' overload.
# Adding it with 'generate_args_kwargs' does not work, since these also get passed
# onto the reference implementations.
supports_out=False,
assert_autodiffed=True,
assert_jit_shape_analysis=True,
promotes_int_to_float=True,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.mean),
error_inputs_func=error_inputs_mean,
skips=(
# FIXME: mean does not support passing keepdim without passing dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: mean reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values',
device_type='cuda', dtypes=[torch.complex64]),
),
),
ReductionOpInfo(
'nanmean',
nan_policy='omit',
assert_autodiffed=True,
promotes_int_to_float=True,
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True),
ref=reference_reduction_numpy(np.nanmean),
skips=(
# AssertionError: False is not true :
# Failure in testing nodes' autodifferentiation.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# FIXME: prod reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
device_type='cuda', dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values',
device_type='cuda', dtypes=[torch.complex64]),
),
),
ReductionOpInfo(
'std',
nan_policy='propagate',
supports_out=False,
complex_to_real=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_autodiffed=True,
promotes_int_to_float=True,
check_batched_forward_grad=False,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
ref=reference_std_var(np.std),
generate_args_kwargs=generate_std_var_kwargs,
skips=(
# FIXME: cannot specify keepdim without dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: dim=[] reduces all dimensions
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'),
# NumPy is giving NaN for this
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'),
),
),
ReductionOpInfo(
'var',
nan_policy='propagate',
supports_out=False,
assert_autodiffed=True,
promotes_int_to_float=True,
complex_to_real=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
ref=reference_std_var(np.var),
generate_args_kwargs=generate_std_var_kwargs,
skips=(
# FIXME: cannot specify keepdim without dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: dim=[] reduces all dimensions
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'),
# NumPy is giving NaN for this
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'),
),
),
ReductionOpInfo(
'prod',
identity=1,
nan_policy='propagate',
supports_multiple_dims=False,
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_int64=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_prod,
ref=reference_reduction_numpy(np.prod),
skips=(
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'),
# FIXME: prod does not support passing keepdim without passing dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: prod reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: prod does not support passing None to dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16, torch.complex64]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
dtypes=[torch.uint8, torch.float16, torch.complex64]),
),
),
ReductionOpInfo(
'sum',
identity=0,
nan_policy='propagate',
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_int64=True,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
ref=reference_reduction_numpy(np.sum),
skips=(
# FIXME: sum does not support passing keepdim without passing dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
dtypes=[torch.float16]),
),
),
ReductionOpInfo(
'nansum',
identity=0,
nan_policy='omit',
supports_out=True,
promotes_int_to_int64=True,
supports_forward_ad=True,
check_batched_forward_grad=False,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True),
ref=reference_reduction_numpy(np.nansum),
skips=(
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# FIXME: nansum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: flaky test so skipped instead of xfailed
# possibly bad low precision reference in numpy
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
),
),
ReductionOpInfo(
'_masked.sum',
ref=reference_reduction_numpy(np.sum),
method_variant=None,
identity=0,
nan_policy='propagate',
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
promotes_int_to_int64=True,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
DecorateInfo(unittest.skip("Failing on some jobs"), 'TestReductions', 'test_reference_masked',
dtypes=(torch.bool, torch.int8, torch.int16, torch.int32)),
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: undefined value tensor
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
decorators=[
DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-03, rtol=1e-03)}),
'TestReductions', 'test_reference_masked'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),
'TestReductions', 'test_reference_masked'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}),
'TestReductions', 'test_ref_small_input'),
],
sample_inputs_func=sample_inputs_masked_reduction,
sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction,
sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction
),
ReductionOpInfo(
'_masked.prod',
ref=reference_reduction_numpy(np.prod),
method_variant=None,
identity=1,
nan_policy='propagate',
# https://github.com/pytorch/pytorch/issues/80411
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse=True,
supports_sparse_csr=True,
promotes_int_to_int64=True,
# FIXME: "prod_cpu" not implemented for 'BFloat16'
# FIXME: "prod_cpu" not implemented for 'Half'
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'),
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Failing on some jobs"), 'TestReductions', 'test_reference_masked',
dtypes=(torch.bool, torch.int8, torch.int16, torch.int32),),
# FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs)
DecorateInfo(unittest.skip("Skipped!"), 'TestMasked', 'test_mask_layout', device_type='cuda',
dtypes=(torch.bool, torch.int8, torch.uint8, torch.int16, torch.int32,
torch.int64, torch.complex64, torch.complex128)),
),
decorators=[
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-02)}),
'TestReductions', 'test_reference_masked'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),
'TestReductions', 'test_ref_duplicate_values'),
],
sample_inputs_func=sample_inputs_masked_reduction,
sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction,
sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction,
),
OpInfo(
'_masked.cumsum',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
method_variant=None,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
# Can reuse the same inputs; dim is required in both
sample_inputs_func=sample_inputs_masked_cumops,
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
),
OpInfo(
'_masked.cumprod',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
method_variant=None,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
# Can reuse the same inputs; dim is required in both
sample_inputs_func=sample_inputs_masked_cumops,
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
),
ReductionOpInfo(
'_masked.amax',
nan_policy='propagate',
supports_out=False,
dtypes=all_types_and(torch.float16, torch.bfloat16),
supports_sparse=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_sparse_csr=True,
ref=reference_reduction_numpy(np.amax),
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# FIXME: amax reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: Unknown builtin op: aten::iinfo
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs)
# FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs)
DecorateInfo(unittest.skip("Skipped!"), 'TestMasked', 'test_mask_layout',
dtypes=(torch.bool, torch.int8, torch.uint8, torch.int16, torch.int32,
torch.int64, torch.complex64, torch.complex128)),
),
sample_inputs_func=sample_inputs_masked_reduction,
sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction,
sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
ReductionOpInfo(
'_masked.amin',
nan_policy='propagate',
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=all_types_and(torch.float16, torch.bfloat16),
supports_sparse=True,
supports_sparse_csr=True,
ref=reference_reduction_numpy(np.amin),
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# FIXME: amax reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: Unknown builtin op: aten::iinfo
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs)
# FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs)
DecorateInfo(unittest.skip("Skipped!"), 'TestMasked', 'test_mask_layout',
dtypes=(torch.bool, torch.int8, torch.uint8, torch.int16, torch.int32,
torch.int64, torch.complex64, torch.complex128)),
),
sample_inputs_func=sample_inputs_masked_reduction,
sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction,
sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
ReductionOpInfo(
'_masked.argmax',
supports_out=False,
supports_multiple_dims=False,
supports_autograd=False,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.argmax, supports_keepdims=False),
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# initial is not a keyword for argmax
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_reference_masked'),
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)),
),
sample_inputs_func=sample_inputs_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
ReductionOpInfo(
'_masked.argmin',
supports_out=False,
supports_multiple_dims=False,
supports_autograd=False,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.argmin, supports_keepdims=False),
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# initial is not a keyword for argmin
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_reference_masked'),
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)),
),
sample_inputs_func=sample_inputs_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
ReductionOpInfo(
'_masked.mean',
ref=reference_reduction_numpy(np.mean) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None,
method_variant=None,
nan_policy='propagate',
supports_out=False,
supports_sparse_csr=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
promotes_int_to_float=True,
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),
skips=(
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_ref_duplicate_values',
dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_reference_masked',
dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_ref_small_input',
dtypes=(torch.bool,)),
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: undefined value tensor
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs)
DecorateInfo(unittest.skip("Skipped!"), 'TestMasked', 'test_mask_layout',
dtypes=(torch.bool, torch.int8, torch.uint8, torch.int16, torch.int32,
torch.int64, torch.complex64, torch.complex128)),
),
decorators=[
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),
'TestReductions', 'test_reference_masked'),
],
sample_inputs_func=sample_inputs_masked_reduction,
sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
OpInfo(
'_masked.median',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16),
method_variant=None,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_masked_softmax,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
ReductionOpInfo(
'_masked.norm',
identity=0,
method_variant=None,
nan_policy='propagate',
supports_out=False,
promotes_int_to_float=True,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
# torch.jit.frontend.NotSupportedError: Compiled functions
# can't take variable number of arguments or use
# keyword-only arguments with defaults
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_masked_norm,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
ReductionOpInfo(
'_masked.var',
ref=reference_reduction_numpy(np.var) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None,
method_variant=None,
nan_policy='propagate',
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
promotes_int_to_float=True,
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: undefined value tensor
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
decorators=[
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02),
torch.bfloat16: tol(atol=1e-03, rtol=1e-03)}),
'TestReductions', 'test_reference_masked'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestReductions', 'test_ref_small_input'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestMasked', 'test_reference_masked'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestCudaFuserOpInfo', 'test_nvfuser_correctness'),
],
sample_inputs_func=sample_inputs_masked_std_var,
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
check_batched_grad=True,
),
ReductionOpInfo(
'_masked.std',
ref=reference_reduction_numpy(np.std) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None,
method_variant=None,
nan_policy='propagate',
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
promotes_int_to_float=True,
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
skips=(
# Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479
DecorateInfo(
unittest.skip("Skipped!"),
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
dtypes=(torch.complex64, torch.complex128)),
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: undefined value tensor
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness',
dtypes=(torch.float16,)),
),
decorators=[
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestReductions', 'test_reference_masked'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestReductions', 'test_ref_small_input'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestMasked', 'test_reference_masked'),
],
sample_inputs_func=sample_inputs_masked_std_var,
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
check_batched_grad=True,
),
OpInfo(
'_masked.softmax',
method_variant=None,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_softmax,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
supports_forward_ad=True,
supports_out=False),
OpInfo(
'_masked.log_softmax',
method_variant=None,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_softmax,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
decorators=[
DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1e-02)}),
'TestMasked', 'test_reference_masked'),
],
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
supports_forward_ad=True,
supports_out=False),
OpInfo(
'_masked.softmin',
method_variant=None,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_softmax,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
supports_forward_ad=True,
supports_out=False),
OpInfo(
'_masked.normalize',
method_variant=None,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_normalize,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# RuntimeError: "clamp_min_cpu" not implemented for 'Half'
DecorateInfo(unittest.expectedFailure, 'TestMasked', 'test_reference_masked',
device_type='cpu', dtypes=[torch.half]),
),
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False),
OpInfo(
'_masked.logaddexp',
dtypes=floating_types_and(torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
),
sample_inputs_func=sample_inputs_masked_logaddexp,
gradcheck_wrapper=gradcheck_wrapper_masked_pointwise_operation
),
ReductionOpInfo(
'_masked.logsumexp',
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
method_variant=None,
nan_policy='propagate',
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# FIXME: reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# Identity can't be -torch.inf without overflow
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_empty_tensor_empty_slice'),
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# all the values are the same except for -inf vs nan
DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive'),
),
sample_inputs_func=sample_inputs_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
OpInfo(
"nn.functional.ctc_loss",
dtypes=floating_types(),
supports_out=False,
sample_inputs_func=sample_inputs_ctc_loss,
skips=(
# https://github.com/pytorch/pytorch/issues/67462
# torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for output 0 with respect to input 0
DecorateInfo(
unittest.expectedFailure,
"TestGradients",
"test_fn_grad",
dtypes=(torch.float64,),
),
# RuntimeError: derivative for aten::_ctc_loss_backward is not implemented
DecorateInfo(
unittest.expectedFailure,
"TestGradients",
"test_fn_gradgrad",
dtypes=(torch.float64,),
),
# RuntimeError: derivative for aten::_ctc_loss_backward is not implemented
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
# Operation calls data_ptr() somewhere; needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
),
),
OpInfo(
"nn.functional.cosine_embedding_loss",
dtypes=all_types_and(torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_cosine_embedding_loss,
),
OpInfo(
"nn.functional.nll_loss",
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_nll_loss,
supports_forward_ad=True,
assert_jit_shape_analysis=True,
skips=(
# RuntimeError:
# undefined value tensor:
# File "<string>", line 3
# def the_method(i0, i1):
# return torch.nn.functional.nll_loss(i0, i1, weight=tensor([8.4784, 1.7658, 4.3228], dtype=torch.float32))
# ~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
),
),
OpInfo(
"nn.functional.gaussian_nll_loss",
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_gaussian_nll_loss,
skips=(
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'),
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'),
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
),
decorators=(
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02),
torch.bfloat16: tol(atol=1e-02, rtol=1e-02)}),
'TestCudaFuserOpInfo', 'test_nvfuser_correctness'),
)
),
OpInfo(
"nn.functional.hinge_embedding_loss",
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_hinge_embedding_loss,
error_inputs_func=error_inputs_hinge_embedding_loss,
reference_inputs_func=reference_inputs_hinge_embedding_loss,
),
OpInfo(
"nn.functional.huber_loss",
aten_backward_name='huber_loss_backward',
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_huber_loss,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
)
),
OpInfo(
"nn.functional.pdist",
ref=reference_pdist,
sample_inputs_func=sample_inputs_pdist,
dtypes=floating_types(),
supports_out=False,
supports_gradgrad=False),
OpInfo(
"nn.functional.poisson_nll_loss",
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_poisson_nll_loss,
),
OpInfo(
"argsort",
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_argsort,
supports_out=False,
supports_autograd=False,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
),
),
OpInfo(
"repeat_interleave",
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_repeat_interleave,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
),
OpInfo(
"nn.functional.pairwise_distance",
ref=lambda a, b, p=2.0, eps=1e-6, keepdim=False: (
np.sum(np.abs(a - b + eps) ** p, axis=-1, keepdims=keepdim) ** (1 / p)
),
sample_inputs_func=sample_inputs_pairwise_distance,
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
),
OpInfo(
"nn.functional.pixel_shuffle",
sample_inputs_func=sample_inputs_pixel_shuffle,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# https://github.com/pytorch/pytorch/issues/82235
DecorateInfo(
unittest.expectedFailure,
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
device_type='cuda',
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
),
OpInfo(
"nn.functional.pixel_unshuffle",
sample_inputs_func=sample_inputs_pixel_unshuffle,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
# https://github.com/pytorch/pytorch/issues/82235
DecorateInfo(
unittest.expectedFailure,
'TestSchemaCheckModeOpInfo',
'test_schema_correctness',
device_type='cuda',
),
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
),
OpInfo(
"nn.functional.kl_div",
sample_inputs_func=sample_inputs_kl_div,
dtypes=floating_types_and(torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64),
dtypesIfCUDA=floating_types_and(
torch.float16, torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64
),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
),
OpInfo(
"diagflat",
ref=lambda input, offset=0: np.diagflat(input, k=offset),
sample_inputs_func=sample_inputs_diagflat,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
# See https://github.com/pytorch/pytorch/pull/78358
check_batched_forward_grad=False,
),
OpInfo(
'scatter_reduce',
variant_test_name='sum',
# complex not added to dtypes as complex gradients are not properly handled
# and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_scatter_reduce,
),
OpInfo(
'scatter_reduce',
variant_test_name='prod',
# complex not added to dtypes as complex gradients are not properly handled
# and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_scatter_reduce,
skips=(
# Pre-existing condition (calls .item); needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'),
),
),
OpInfo(
'scatter_reduce',
variant_test_name='mean',
# complex not added to dtypes as complex gradients are not properly handled
# and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet
dtypes=all_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_scatter_reduce,
),
OpInfo(
'scatter_reduce',
variant_test_name='amin',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_scatter_reduce,
),
OpInfo(
'scatter_reduce',
variant_test_name='amax',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_scatter_reduce,
),
OpInfo(
'segment_reduce',
variant_test_name='lengths',
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
# RuntimeError: derivative for aten::_segment_reduce_backward is not implemented
supports_gradgrad=False,
sample_inputs_func=sample_inputs_segment_reduce,
skips=(
# FIXME: CUDA driver API confirmed a leak in
# __main__.TestJitCUDA.test_variant_consistency_jit_segment_reduce_cuda_float32
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="cuda",
),
),
),
OpInfo(
'segment_reduce',
variant_test_name='offsets',
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
# RuntimeError: derivative for aten::_segment_reduce_backward is not implemented
supports_gradgrad=False,
sample_inputs_func=partial(sample_inputs_segment_reduce, mode='offsets'),
skips=(
# FIXME: CUDA driver API confirmed a leak in
# __main__.TestJitCUDA.test_variant_consistency_jit_segment_reduce_cuda_float32
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
device_type="cuda",
),
),
),
UnaryUfuncInfo(
'special.airy_ai',
decorators=(
precisionOverride(
{
torch.float32: 1e-03,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=lambda x: scipy.special.airy(x)[0] if TEST_SCIPY else None,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
'TestUnaryUfuncs',
'test_reference_numerics_large',
),
),
supports_autograd=False,
),
UnaryUfuncInfo(
'special.bessel_j0',
decorators=(
precisionOverride(
{
torch.float32: 1e-04,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.j0 if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
'special.bessel_j1',
decorators=(
precisionOverride(
{
torch.float32: 1e-04,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.j1 if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
'special.bessel_y0',
decorators=(
precisionOverride(
{
torch.float32: 1e-04,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.y0 if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
'special.bessel_y1',
decorators=(
precisionOverride(
{
torch.float32: 1e-04,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.y1 if TEST_SCIPY else None,
supports_autograd=False,
),
BinaryUfuncInfo(
'special.chebyshev_polynomial_t',
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
'special.chebyshev_polynomial_u',
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
'special.chebyshev_polynomial_v',
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipping - testing takes an unreasonably long time, #79528")),
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
'special.chebyshev_polynomial_w',
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipping - testing takes an unreasonably long time, #79528")),
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
'special.hermite_polynomial_h',
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
'special.hermite_polynomial_he',
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
'special.laguerre_polynomial_l',
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
'special.legendre_polynomial_p',
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipping - testing takes an unreasonably long time, #79528")),
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
UnaryUfuncInfo(
'special.modified_bessel_i0',
decorators=(
precisionOverride(
{
torch.float32: 1e-03,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.i0 if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
'special.modified_bessel_i1',
decorators=(
precisionOverride(
{
torch.float32: 1e-03,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.i1 if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
'special.modified_bessel_k0',
decorators=(
precisionOverride(
{
torch.float32: 1e-03,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.k0 if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
'special.modified_bessel_k1',
decorators=(
precisionOverride(
{
torch.float32: 1e-03,
torch.float64: 1e-05,
},
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.k1 if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
'special.scaled_modified_bessel_k0',
decorators=(
toleranceOverride(
{
torch.float32: tol(atol=1e-03, rtol=1e-03),
torch.float64: tol(atol=1e-05, rtol=1e-03),
}
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.k0e if TEST_SCIPY else None,
supports_autograd=False,
),
UnaryUfuncInfo(
'special.scaled_modified_bessel_k1',
decorators=(
toleranceOverride(
{
torch.float32: tol(atol=1e-03, rtol=1e-03),
torch.float64: tol(atol=1e-05, rtol=1e-03),
}
),
),
dtypes=all_types_and(torch.bool),
ref=scipy.special.k1e if TEST_SCIPY else None,
supports_autograd=False,
),
BinaryUfuncInfo(
'special.shifted_chebyshev_polynomial_t',
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipping - testing takes an unreasonably long time, #79528")),
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
'special.shifted_chebyshev_polynomial_u',
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipping - testing takes an unreasonably long time, #79528")),
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
'special.shifted_chebyshev_polynomial_v',
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipping - testing takes an unreasonably long time, #79528")),
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
BinaryUfuncInfo(
'special.shifted_chebyshev_polynomial_w',
dtypes=all_types_and(torch.bool),
promotes_int_to_float=True,
skips=(
DecorateInfo(unittest.skip("Skipping - testing takes an unreasonably long time, #79528")),
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'),
),
supports_one_python_scalar=True,
supports_autograd=False,
),
UnaryUfuncInfo(
'special.spherical_bessel_j0',
decorators=(
toleranceOverride(
{
torch.float32: tol(atol=1e-03, rtol=1e-03),
torch.float64: tol(atol=1e-05, rtol=1e-03),
}
),
),
dtypes=all_types_and(torch.bool),
ref=lambda x: scipy.special.spherical_jn(0, x) if TEST_SCIPY else None,
supports_autograd=False,
),
]
class ReductionPythonRefInfo(ReductionOpInfo):
'''
An OpInfo for a Python reference of an elementwise unary operation.
'''
def __init__(
self,
name, # the stringname of the callable Python reference
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
torch_opinfo_name, # the string name of the corresponding torch opinfo
torch_opinfo_variant_name='', # the variant name for corresponding torch opinfo
supports_nvfuser=True,
**kwargs): # additional kwargs override kwargs inherited from the torch opinfo
self.torch_opinfo_name = torch_opinfo_name
self.torch_opinfo_variant_name = torch_opinfo_variant_name
self.torch_opinfo = _find_referenced_opinfo(torch_opinfo_name, torch_opinfo_variant_name)
self.supports_nvfuser = supports_nvfuser
assert isinstance(self.torch_opinfo, ReductionOpInfo)
inherited = self.torch_opinfo._original_reduction_args
ukwargs = _inherit_constructor_args(name, op, inherited, kwargs)
# See https://github.com/pytorch/pytorch/issues/77216
self.validate_view_consistency = False
super().__init__(**ukwargs)
class ElementwiseUnaryPythonRefInfo(UnaryUfuncInfo):
'''
An OpInfo for a Python reference of an elementwise unary operation.
'''
def __init__(
self,
name, # the stringname of the callable Python reference
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
torch_opinfo_name, # the string name of the corresponding torch opinfo
torch_opinfo_variant_name='', # the variant name for corresponding torch opinfo
supports_nvfuser=True,
**kwargs): # additional kwargs override kwargs inherited from the torch opinfo
self.torch_opinfo_name = torch_opinfo_name
self.torch_opinfo_variant_name = torch_opinfo_variant_name
self.torch_opinfo = _find_referenced_opinfo(torch_opinfo_name, torch_opinfo_variant_name)
self.supports_nvfuser = supports_nvfuser
assert isinstance(self.torch_opinfo, UnaryUfuncInfo)
inherited = self.torch_opinfo._original_unary_ufunc_args
ukwargs = _inherit_constructor_args(name, op, inherited, kwargs)
super(ElementwiseUnaryPythonRefInfo, self).__init__(**ukwargs)
class ElementwiseBinaryPythonRefInfo(BinaryUfuncInfo):
'''
An OpInfo for a Python reference of an elementwise binary operation.
'''
def __init__(
self,
name, # the stringname of the callable Python reference
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
torch_opinfo_name, # the string name of the corresponding torch opinfo
torch_opinfo_variant_name='', # the variant name for corresponding torch opinfo
supports_nvfuser=True,
**kwargs): # additional kwargs override kwargs inherited from the torch opinfo
self.torch_opinfo_name = torch_opinfo_name
self.torch_opinfo_variant_name = torch_opinfo_variant_name
self.torch_opinfo = _find_referenced_opinfo(torch_opinfo_name, torch_opinfo_variant_name)
self.supports_nvfuser = supports_nvfuser
assert isinstance(self.torch_opinfo, BinaryUfuncInfo)
inherited = self.torch_opinfo._original_binary_ufunc_args
ukwargs = _inherit_constructor_args(name, op, inherited, kwargs)
super(ElementwiseBinaryPythonRefInfo, self).__init__(**ukwargs)
class SpectralFuncPythonRefInfo(SpectralFuncInfo):
'''
An OpInfo for a Python reference of an elementwise unary operation.
'''
def __init__(
self,
name, # the stringname of the callable Python reference
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
torch_opinfo_name, # the string name of the corresponding torch opinfo
torch_opinfo_variant='',
supports_nvfuser=True,
**kwargs): # additional kwargs override kwargs inherited from the torch opinfo
self.torch_opinfo_name = torch_opinfo_name
self.torch_opinfo = _find_referenced_opinfo(torch_opinfo_name, torch_opinfo_variant)
self.supports_nvfuser = supports_nvfuser
assert isinstance(self.torch_opinfo, SpectralFuncInfo)
inherited = self.torch_opinfo._original_spectral_func_args
ukwargs = _inherit_constructor_args(name, op, inherited, kwargs)
super().__init__(**ukwargs)
# Separate registry for experimental Python Reference OpInfos.
python_ref_db = [
#
# Elementwise Unary OpInfos
#
ElementwiseUnaryPythonRefInfo(
"_refs.abs",
torch_opinfo_name="abs",
skips=(
# Reference result was farther (0.0) from the precise computation
# than the torch result was (nan)!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.chalf,), device_type='cpu', active_if=not (IS_MACOS or IS_WINDOWS)),
# Reference result was farther (0.0) from the precise computation
# than the torch result was (nan)!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.chalf,), device_type='cpu', active_if=not (IS_MACOS or IS_WINDOWS)),
)
),
ElementwiseUnaryPythonRefInfo(
"_refs.acos",
torch_opinfo_name="acos",
),
ElementwiseUnaryPythonRefInfo(
"_refs.acosh",
torch_opinfo_name="acosh",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.asin",
torch_opinfo_name="asin",
),
ElementwiseUnaryPythonRefInfo(
"_refs.asinh",
torch_opinfo_name="asinh",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.arange",
torch_opinfo_name="arange",
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# See https://github.com/pytorch/pytorch/issues/82364
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# Prims arange does not follow aten
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta',
dtypes=(torch.int64,)),
),
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.linspace",
torch_opinfo_name="linspace",
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# cpu implementation is wrong on some integral types
# https://github.com/pytorch/pytorch/issues/81996
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.int16, torch.int32, torch.int64), device_type="cpu"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.int16, torch.int32, torch.int64), device_type="cpu"),
# cuda implementation is off-by-one on some inputs due to precision issues
# https://github.com/pytorch/pytorch/issues/82230
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64),
device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64),
device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor',
dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64),
device_type="cuda"),
),
# returns a view of an intermediate tensor (prims.to_dtype)
validate_view_consistency=False,
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.logspace",
torch_opinfo_name="logspace",
skips=(
# Tests that assume input is a tensor or sequence of tensors
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),
# Off-by-one issue when casting floats to ints
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.int16, torch.int32, torch.int64),
device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.int16, torch.int32, torch.int64),
device_type="cuda"),
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor',
dtypes=(torch.int16, torch.int32, torch.int64),
device_type="cuda"),
),
# returns a view of an intermediate tensor (prims.to_dtype)
validate_view_consistency=False,
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.meshgrid",
torch_opinfo_name="meshgrid",
torch_opinfo_variant_name="variadic_tensors",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.meshgrid",
torch_opinfo_name="meshgrid",
torch_opinfo_variant_name="list_of_tensors",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.atan",
torch_opinfo_name="atan",
),
ElementwiseUnaryPythonRefInfo(
"_refs.atanh",
torch_opinfo_name="atanh",
),
ElementwiseUnaryPythonRefInfo(
"_refs.bitwise_not",
torch_opinfo_name="bitwise_not",
),
ElementwiseUnaryPythonRefInfo(
"_refs.ceil",
torch_opinfo_name="ceil",
),
ElementwiseUnaryPythonRefInfo(
"_refs.conj_physical",
torch_opinfo_name="conj_physical",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.cos",
torch_opinfo_name="cos",
),
ElementwiseUnaryPythonRefInfo(
"_refs.cosh",
torch_opinfo_name="cosh",
),
ElementwiseUnaryPythonRefInfo(
"_refs.digamma",
torch_opinfo_name="digamma",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.erf",
torch_opinfo_name="erf",
),
ElementwiseUnaryPythonRefInfo(
"_refs.erfinv",
torch_opinfo_name="erfinv",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.erfc",
torch_opinfo_name="erfc",
),
ElementwiseUnaryPythonRefInfo(
"_refs.exp",
torch_opinfo_name="exp",
),
ElementwiseUnaryPythonRefInfo(
"_refs.expm1",
torch_opinfo_name="expm1",
),
ElementwiseUnaryPythonRefInfo(
"_refs.exp2",
torch_opinfo_name="exp2",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.fill",
torch_opinfo_name="fill",
supports_out=True,
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.floor",
torch_opinfo_name="floor",
),
ElementwiseUnaryPythonRefInfo(
"_refs.frac",
torch_opinfo_name="frac",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.imag",
torch_opinfo_name="imag",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.isfinite",
torch_opinfo_name="isfinite",
supports_out=True,
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.isinf",
torch_opinfo_name="isinf",
supports_out=True,
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.isposinf",
torch_opinfo_name="isposinf",
supports_out=True,
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.isneginf",
torch_opinfo_name="isneginf",
supports_out=True,
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.isnan",
torch_opinfo_name="isnan",
supports_out=True,
),
ElementwiseUnaryPythonRefInfo(
"_refs.i0",
torch_opinfo_name="i0",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.lgamma",
torch_opinfo_name="lgamma",
),
ElementwiseUnaryPythonRefInfo(
"_refs.log",
torch_opinfo_name="log",
),
ElementwiseUnaryPythonRefInfo(
"_refs.log1p",
torch_opinfo_name="log1p",
),
ElementwiseUnaryPythonRefInfo(
"_refs.log10",
torch_opinfo_name="log10",
),
ElementwiseUnaryPythonRefInfo(
"_refs.log2",
torch_opinfo_name="log2",
),
PythonRefInfo(
"_refs.logsumexp",
torch_opinfo_name="logsumexp",
# When keepdim=False logsumexp function uses squeeze operation
# that is not yet exposed in nvFuser's Python API.
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.log_softmax",
torch_opinfo_name="log_softmax",
),
ElementwiseUnaryPythonRefInfo(
"_refs.nan_to_num",
torch_opinfo_name="nan_to_num",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.neg",
torch_opinfo_name="neg",
),
ElementwiseUnaryPythonRefInfo(
"_refs.positive",
torch_opinfo_name="positive",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.real",
torch_opinfo_name="real",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.reciprocal",
torch_opinfo_name="reciprocal",
),
ElementwiseUnaryPythonRefInfo(
"_refs.round",
torch_opinfo_name="round",
),
ElementwiseUnaryPythonRefInfo(
"_refs.rsqrt",
torch_opinfo_name="rsqrt",
),
ElementwiseUnaryPythonRefInfo(
"_refs.sigmoid",
torch_opinfo_name="sigmoid",
# Reference: https://github.com/pytorch/pytorch/issues/56012
handles_complex_extremal_values=False,
handles_large_floats=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.sign",
torch_opinfo_name="sign",
),
ElementwiseUnaryPythonRefInfo(
"_refs.signbit",
torch_opinfo_name="signbit",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.sin",
torch_opinfo_name="sin",
),
ElementwiseUnaryPythonRefInfo(
"_refs.sinh",
torch_opinfo_name="sinh",
),
PythonRefInfo(
"_refs.softmax",
torch_opinfo_name="softmax",
),
ElementwiseUnaryPythonRefInfo(
"_refs.sqrt",
torch_opinfo_name="sqrt",
),
ElementwiseUnaryPythonRefInfo(
"_refs.square",
torch_opinfo_name="square",
skips=(
# AssertionError: Reference result was farther (2.2417024338305655e-07) from the precise computation
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_executor', dtypes=(torch.complex64,)),
),
),
ElementwiseUnaryPythonRefInfo(
"_refs.tan",
torch_opinfo_name="tan",
),
ElementwiseUnaryPythonRefInfo(
"_refs.tanh",
torch_opinfo_name="tanh",
),
ElementwiseUnaryPythonRefInfo(
"_refs.trunc",
torch_opinfo_name="trunc",
),
#
# Elementwise Unary Special OpInfos
#
ElementwiseUnaryPythonRefInfo(
"_refs.special.i0e",
torch_opinfo_name="special.i0e",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.i1",
torch_opinfo_name="special.i1",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.i1e",
torch_opinfo_name="special.i1e",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.special.logit",
torch_opinfo_name="logit",
supports_nvfuser=False,
),
#
# Elementwise Unary nn.functional OpInfos
#
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.celu",
torch_opinfo_name="nn.functional.celu",
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.threshold",
torch_opinfo_name="nn.functional.threshold",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.nn.functional.dropout",
torch_opinfo_name="nn.functional.dropout",
decorators=(
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestCommon',
'test_python_ref'),
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestMathBits',
'test_conj_view'),
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestMathBits',
'test_neg_conj_view'),
DecorateInfo(unittest.skip("Expected: dropout is not comparable"),
'TestMathBits',
'test_neg_view'),
# dropout is not comparable
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'),
)
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.elu",
torch_opinfo_name="nn.functional.elu",
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.hardtanh",
torch_opinfo_name="nn.functional.hardtanh",
supports_nvfuser=False,
),
PythonRefInfo( # TODO: Port this to an UnaryOpInfo
"_refs.nn.functional.gelu",
torch_opinfo_name="nn.functional.gelu",
),
PythonRefInfo(
"_refs.nn.functional.layer_norm",
torch_opinfo_name="nn.functional.layer_norm",
skips=(
# Reference result was farther (3.5762786809723224e-07) from the precise computation
# than the torch result was (2.5068410824946596e-07)!
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref',
dtypes=(torch.float32,), device_type='cpu'),
),
),
PythonRefInfo(
"_refs.nn.functional.leaky_relu",
torch_opinfo_name="nn.functional.leaky_relu",
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.prelu",
torch_opinfo_name="nn.functional.prelu",
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.relu",
torch_opinfo_name="nn.functional.relu",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.relu6",
torch_opinfo_name="nn.functional.relu6",
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.mish",
torch_opinfo_name="nn.functional.mish",
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.selu",
torch_opinfo_name="nn.functional.selu",
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.softplus",
torch_opinfo_name="nn.functional.softplus",
),
PythonRefInfo(
"_refs.nn.functional.l1_loss",
torch_opinfo_name="nn.functional.l1_loss",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.nn.functional.margin_ranking_loss",
torch_opinfo_name="nn.functional.margin_ranking_loss",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.nn.functional.mse_loss",
torch_opinfo_name="nn.functional.mse_loss",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.nn.functional.hinge_embedding_loss",
torch_opinfo_name="nn.functional.hinge_embedding_loss",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.tanhshrink",
torch_opinfo_name="nn.functional.tanhshrink",
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.hardshrink",
torch_opinfo_name="nn.functional.hardshrink",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.nn.functional.softshrink",
torch_opinfo_name="nn.functional.softshrink",
supports_nvfuser=False,
),
#
# Elementwise Binary Reference OpInfos
#
ElementwiseBinaryPythonRefInfo(
"_refs.add",
torch_opinfo_name="add",
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=False,
supports_one_python_scalar=True,
),
ElementwiseBinaryPythonRefInfo(
"_refs.atan2",
torch_opinfo_name="atan2",
),
ElementwiseBinaryPythonRefInfo(
"_refs.bitwise_and",
torch_opinfo_name="bitwise_and",
),
ElementwiseBinaryPythonRefInfo(
"_refs.bitwise_left_shift",
torch_opinfo_name="bitwise_left_shift",
supports_nvfuser=False,
),
ElementwiseBinaryPythonRefInfo(
"_refs.bitwise_or",
torch_opinfo_name="bitwise_or",
),
ElementwiseBinaryPythonRefInfo(
"_refs.bitwise_xor",
torch_opinfo_name="bitwise_xor",
),
ElementwiseBinaryPythonRefInfo(
"_refs.copysign",
torch_opinfo_name="copysign",
supports_nvfuser=False,
skips=(
# RuntimeError: Expected divisor (b) to be on the same device (cuda:0) as dividend (a), but it is found on cpu!
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'),
)
),
ElementwiseBinaryPythonRefInfo(
"_refs.div",
torch_opinfo_name="div",
torch_opinfo_variant_name="no_rounding_mode",
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=False,
supports_one_python_scalar=True,
supports_nvfuser=False,
skips=(
# NotImplementedError: argument of type: <class 'complex'>
DecorateInfo(
unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_executor',
dtypes=(torch.complex32, torch.complex64, torch.complex128,)
),
# Reference result was farther (0.7433461727239705) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.complex32,), device_type="cuda"
),
# Reference result was farther (0.7433461727239705) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.complex32,), device_type="cuda"
),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.div",
torch_opinfo_name="div",
torch_opinfo_variant_name="trunc_rounding",
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=False,
supports_one_python_scalar=True,
supports_nvfuser=False,
),
ElementwiseBinaryPythonRefInfo(
"_refs.div",
torch_opinfo_name="div",
torch_opinfo_variant_name="floor_rounding",
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=False,
supports_one_python_scalar=True,
supports_nvfuser=False,
),
ElementwiseBinaryPythonRefInfo(
"_refs.eq",
torch_opinfo_name="eq",
),
ElementwiseBinaryPythonRefInfo(
"_refs.float_power",
torch_opinfo_name="float_power",
supports_nvfuser=False,
skips=(
# Test doesn't account for float -> double type promotion
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'),
)
),
ElementwiseBinaryPythonRefInfo(
"_refs.floor_divide",
torch_opinfo_name="floor_divide",
rhs_make_tensor_kwargs=dict(exclude_zero=True),
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=False,
supports_one_python_scalar=True,
supports_nvfuser=False,
# bfloat16 floor_divide compared with a float32 reference works inconsistently
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref',
dtypes=(torch.bfloat16,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.bfloat16,)),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.fmax",
torch_opinfo_name="fmax",
supports_rhs_python_scalar=False,
supports_nvfuser=False,
),
ElementwiseBinaryPythonRefInfo(
"_refs.fmin",
torch_opinfo_name="fmin",
supports_rhs_python_scalar=False,
supports_nvfuser=False,
),
ElementwiseBinaryPythonRefInfo(
"_refs.fmod",
torch_opinfo_name="fmod",
rhs_make_tensor_kwargs={'exclude_zero': True},
supports_rhs_python_scalar=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref',
dtypes=(torch.bfloat16,), device_type='cpu'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.bfloat16,), device_type='cpu'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.gcd",
torch_opinfo_name="gcd",
supports_nvfuser=False,
),
ElementwiseBinaryPythonRefInfo(
"_refs.ge",
torch_opinfo_name="ge",
),
ElementwiseBinaryPythonRefInfo(
"_refs.gt",
torch_opinfo_name="gt",
),
ElementwiseBinaryPythonRefInfo(
"_refs.heaviside",
torch_opinfo_name="heaviside",
supports_rhs_python_scalar=False,
supports_nvfuser=False,
),
ElementwiseBinaryPythonRefInfo(
"_refs.hypot",
torch_opinfo_name="hypot",
supports_rhs_python_scalar=False,
supports_nvfuser=False,
),
ElementwiseBinaryPythonRefInfo(
"_refs.igamma",
torch_opinfo_name="igamma",
supports_nvfuser=False,
),
ElementwiseBinaryPythonRefInfo(
"_refs.igammac",
torch_opinfo_name="igammac",
supports_nvfuser=False,
),
ElementwiseBinaryPythonRefInfo(
"_refs.isclose",
torch_opinfo_name="isclose",
supports_nvfuser=False,
skips=(
# Intentional xfail -- isclose does not type promote
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.lcm",
torch_opinfo_name="lcm",
supports_nvfuser=False,
),
ElementwiseBinaryPythonRefInfo(
"_refs.le",
torch_opinfo_name="le",
),
ElementwiseBinaryPythonRefInfo(
"_refs.logical_and",
torch_opinfo_name="logical_and",
),
ElementwiseUnaryPythonRefInfo(
"_refs.logical_not",
torch_opinfo_name="logical_not",
),
ElementwiseBinaryPythonRefInfo(
"_refs.logical_or",
torch_opinfo_name="logical_or",
),
ElementwiseBinaryPythonRefInfo(
"_refs.logical_xor",
torch_opinfo_name="logical_xor",
),
ElementwiseBinaryPythonRefInfo(
"_refs.lt",
torch_opinfo_name="lt",
),
ElementwiseBinaryPythonRefInfo(
"_refs.maximum",
torch_opinfo_name="maximum",
supports_nvfuser=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.minimum",
torch_opinfo_name="minimum",
supports_nvfuser=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.mul",
torch_opinfo_name="mul",
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=False,
supports_one_python_scalar=True,
skips=(
# Reference result was farther (0.0) from the precise computation
# than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor',
dtypes=(torch.complex32,),
),
# Reference result was farther (0.0) from the precise computation
# than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.complex32,), device_type='cuda'
),
# Reference result was farther (0.0) from the precise computation
# than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.complex32,), device_type='cuda'
),
)
),
ElementwiseBinaryPythonRefInfo(
"_refs.ne",
torch_opinfo_name="ne",
),
ElementwiseBinaryPythonRefInfo(
"_refs.nextafter",
torch_opinfo_name="nextafter",
supports_nvfuser=False,
),
ElementwiseBinaryPythonRefInfo(
"_refs.pow",
torch_opinfo_name="pow",
supports_nvfuser=False, # clone default
skips=(
# Reference result was farther (inf) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor',
dtypes=(torch.complex32,),
),
# Reference result was farther (inf) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.complex32,), device_type="cuda"
),
# Reference result was farther (inf) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.complex32,), device_type="cuda"
),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.remainder",
torch_opinfo_name="remainder",
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref',
dtypes=(torch.bfloat16,), device_type='cpu'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.bfloat16,), device_type='cpu'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.rsub",
torch_opinfo_name="rsub",
# https://github.com/pytorch/pytorch/issues/76944
skips=(
# Reference result was farther (nan) from the precise computation than
# the torch result was (nan)!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.chalf,), device_type='cpu'),
# Reference result was farther (nan) from the precise computation than
# the torch result was (nan)!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.chalf,), device_type='cpu'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.sub",
torch_opinfo_name="sub",
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=False,
supports_one_python_scalar=True,
skips=(
# Reference result was farther (nan) from the precise computation than
# the torch result was (nan)!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.chalf,), device_type='cpu'),
# Reference result was farther (nan) from the precise computation than
# the torch result was (nan)!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.chalf,), device_type='cpu'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.true_divide",
torch_opinfo_name="true_divide",
# https://github.com/pytorch/pytorch/issues/76944
supports_two_python_scalars=False,
supports_one_python_scalar=True,
skips=(
# Reference result was farther (0.7433461727239705) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor',
dtypes=(torch.complex32,),
),
# Reference result was farther (0.7433461727239705) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref',
dtypes=(torch.complex32,), device_type="cuda"
),
# Reference result was farther (0.7433461727239705) from the precise
# computation than the torch result was (nan)!
DecorateInfo(
unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback',
dtypes=(torch.complex32,), device_type="cuda"
),
),
),
#
# Elementwise Binary Special OpInfos
#
ElementwiseBinaryPythonRefInfo(
"_refs.special.zeta",
torch_opinfo_name="special.zeta",
supports_one_python_scalar=True,
supports_nvfuser=False,
),
#
# Elementwise Ternary Reference OpInfos
#
ElementwiseBinaryPythonRefInfo(
"_refs.clamp_min",
torch_opinfo_name="clamp_min",
supports_nvfuser=False,
skips=(
# test error disabled since rhs non-tensor python scalar is supported
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
ElementwiseBinaryPythonRefInfo(
"_refs.clamp_max",
torch_opinfo_name="clamp_max",
supports_nvfuser=False,
skips=(
# test error disabled since rhs non-tensor python scalar is supported
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
PythonRefInfo(
"_refs.clamp",
torch_opinfo_name="clamp",
supports_nvfuser=False,
),
#
# Data Conversion & Data Movement Opinfos
#
PythonRefInfo(
"_refs.clone",
torch_opinfo_name="clone",
supports_nvfuser=False,
),
#
# View & Shape OpInfos
#
PythonRefInfo(
"_refs.atleast_1d",
torch_opinfo_name="atleast_1d",
validate_view_consistency=False,
supports_nvfuser=False
),
PythonRefInfo(
"_refs.atleast_2d",
torch_opinfo_name="atleast_2d",
validate_view_consistency=False,
supports_nvfuser=False
),
PythonRefInfo(
"_refs.atleast_3d",
torch_opinfo_name="atleast_3d",
validate_view_consistency=False,
supports_nvfuser=False
),
PythonRefInfo(
"_refs.as_strided",
torch_opinfo_name="as_strided",
# FIXME: doesn't support chalf
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_nvfuser=False,
skips=(
# TODO: fix and/or update to xfails
DecorateInfo(unittest.skip("Errors when storage_offset is included"),
'TestCommon', 'test_python_ref_meta'),
# cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'),
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'),
),
),
PythonRefInfo(
"_refs.broadcast_shapes",
torch_opinfo_name="broadcast_shapes",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.broadcast_tensors",
torch_opinfo_name="broadcast_tensors",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.broadcast_to",
torch_opinfo_name="broadcast_to",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.cat",
torch_opinfo_name="cat",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.chunk",
torch_opinfo_name="chunk",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.column_stack",
torch_opinfo_name="column_stack",
supports_nvfuser=False,
),
ElementwiseUnaryPythonRefInfo(
"_refs.conj",
torch_opinfo_name="conj",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.constant_pad_nd",
torch_opinfo_name="constant_pad_nd",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.contiguous",
torch_opinfo_name="contiguous",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.dsplit",
torch_opinfo_name="dsplit",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.dstack",
torch_opinfo_name="dstack",
supports_nvfuser=False,
skips=(
# https://github.com/pytorch/pytorch/issues/78613
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
PythonRefInfo(
"_refs.expand",
torch_opinfo_name="expand",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.flatten",
torch_opinfo_name="flatten",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.flip",
torch_opinfo_name="flip",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.fliplr",
torch_opinfo_name="fliplr",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.flipud",
torch_opinfo_name="flipud",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.hstack",
torch_opinfo_name="hstack",
supports_nvfuser=False,
skips=(
# https://github.com/pytorch/pytorch/issues/78613
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
PythonRefInfo(
"_refs.narrow",
torch_opinfo_name="narrow",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.native_layer_norm",
torch_opinfo_name="native_layer_norm",
),
PythonRefInfo(
"_refs.permute",
torch_opinfo_name="permute",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.ravel",
torch_opinfo_name="ravel",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.reshape",
torch_opinfo_name="reshape",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.roll",
torch_opinfo_name="roll",
validate_view_consistency=False,
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.rot90",
torch_opinfo_name="rot90",
validate_view_consistency=False,
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.stack",
torch_opinfo_name="stack",
supports_nvfuser=False,
validate_view_consistency=False,
),
PythonRefInfo(
"_refs.squeeze",
torch_opinfo_name="squeeze",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.tensor_split",
torch_opinfo_name="tensor_split",
skips=(
# TensorMeta doesn't support tolist
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta'),
# RuntimeError: no _refs support for torch.Tensor.tolist
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),
# RuntimeError: .tolist() is not supported for tensor subclasses.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'),
)
),
PythonRefInfo(
"_refs.hsplit",
torch_opinfo_name="hsplit",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.vsplit",
torch_opinfo_name="vsplit",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.transpose",
torch_opinfo_name="transpose",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.t",
torch_opinfo_name="t",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.unsqueeze",
torch_opinfo_name="unsqueeze",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.view",
torch_opinfo_name="view",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.vstack",
torch_opinfo_name="vstack",
supports_nvfuser=False,
skips=(
# https://github.com/pytorch/pytorch/issues/78613
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),
),
),
PythonRefInfo(
"_refs.unflatten",
torch_opinfo_name="unflatten",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.unbind",
torch_opinfo_name="unbind",
supports_nvfuser=False,
),
#
# Reduction Reference OpInfos
#
ReductionPythonRefInfo(
"_refs.all",
torch_opinfo_name="all",
),
ReductionPythonRefInfo(
"_refs.amax",
torch_opinfo_name="amax",
),
ReductionPythonRefInfo(
"_refs.amin",
torch_opinfo_name="amin",
),
ReductionPythonRefInfo(
"_refs.any",
torch_opinfo_name="any",
),
ReductionPythonRefInfo(
"_refs.mean",
torch_opinfo_name="mean",
supports_out=True,
),
ReductionPythonRefInfo(
"_refs.std",
torch_opinfo_name="std",
supports_out=True,
),
# std_mean and var_mean are not ReductionInfos
PythonRefInfo(
"_refs.std_mean",
torch_opinfo_name="std_mean",
validate_view_consistency=False,
),
ReductionPythonRefInfo(
"_refs.sum",
torch_opinfo_name="sum",
supports_out=True,
),
ReductionPythonRefInfo(
"_refs.prod",
torch_opinfo_name="prod",
supports_out=True,
supports_nvfuser=False,
),
ReductionPythonRefInfo(
"_refs.var",
torch_opinfo_name="var",
supports_out=True,
),
PythonRefInfo(
"_refs.var_mean",
torch_opinfo_name="var_mean",
validate_view_consistency=False,
),
#
# Linear Algebra Operators
#
PythonRefInfo(
"_refs.addr",
torch_opinfo_name="addr",
supports_nvfuser=False,
decorators=(
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',),
),
),
PythonRefInfo(
"_refs.trace",
torch_opinfo_name="trace",
decorators=(
# TODO: torch.diag is currently not supported by either refs, meta funcs, or NVFuser
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),
DecorateInfo(unittest.skip("diag is not supported by meta"), 'TestCommon', 'test_python_ref_meta'),
DecorateInfo(unittest.skip("diag is not supported by nvfuser"), 'TestCommon', 'test_python_ref_executor'),
),
),
PythonRefInfo(
"_refs.norm",
torch_opinfo_name="norm",
supports_out=True,
# Uses svdvals which does not support nvfuser
supports_nvfuser=False,
# Uses vector_norm inside and vector_norm is affected by
# https://github.com/pytorch/pytorch/issues/77216
validate_view_consistency=False,
),
#
# torch.linalg
#
ReductionPythonRefInfo(
"_refs.linalg.vector_norm",
torch_opinfo_name="linalg.vector_norm",
supports_out=True,
supports_nvfuser=False, # clone_default
),
PythonRefInfo(
"_refs.linalg.matrix_norm",
torch_opinfo_name="linalg.matrix_norm",
supports_out=True,
# Uses svdvals which does not support nvfuser
supports_nvfuser=False,
# Uses vector_norm inside and vector_norm is affected by
# https://github.com/pytorch/pytorch/issues/77216
validate_view_consistency=False,
),
PythonRefInfo(
"_refs.linalg.norm",
torch_opinfo_name="linalg.norm",
supports_out=True,
# Uses svdvals which does not support nvfuser
supports_nvfuser=False,
# Uses vector_norm inside and vector_norm is affected by
# https://github.com/pytorch/pytorch/issues/77216
validate_view_consistency=False,
),
PythonRefInfo(
"_refs.linalg.svd",
torch_opinfo_name="linalg.svd",
supports_out=True,
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.linalg.svdvals",
torch_opinfo_name="linalg.svdvals",
supports_out=True,
supports_nvfuser=False,
),
#
# Tensor Creation Reference OpInfos
#
PythonRefInfo(
"_refs.empty",
torch_opinfo_name="empty",
skips=(
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_python_ref'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_conj_view'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_neg_conj_view'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_neg_view'),
# FIXME: shouldn't check empty results
DecorateInfo(unittest.skip("Can't check result for empty"), 'TestCommon', 'test_python_ref_executor'),
),
),
PythonRefInfo(
"_refs.empty_like",
torch_opinfo_name="empty_like",
supports_nvfuser=False,
skips=(
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_python_ref'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_conj_view'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_neg_conj_view'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_neg_view'),
# FIXME: should not compare results of empty_like
DecorateInfo(unittest.skip("Can't check result for empty_like"), 'TestCommon', 'test_python_ref_executor'),
),
),
PythonRefInfo(
"_refs.new_empty",
torch_opinfo_name="new_empty",
supports_nvfuser=False,
skips=(
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_python_ref'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_python_ref_torch_fallback'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestCommon',
'test_out_warning'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_conj_view'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_neg_conj_view'),
DecorateInfo(unittest.skip("Expected: empty is not comparable"),
'TestMathBits',
'test_neg_view'),
# FIXME: should not compare results of empty_like
DecorateInfo(unittest.skip("Can't check result for new_empty"), 'TestCommon', 'test_python_ref_executor'),
),
),
PythonRefInfo(
"_refs.new_full",
torch_opinfo_name="new_full",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.new_ones",
torch_opinfo_name="new_ones",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.new_zeros",
torch_opinfo_name="new_zeros",
supports_nvfuser=False,
),
#
# Conditional Reference OpInfos
#
PythonRefInfo(
"_refs.masked_fill",
torch_opinfo_name="masked_fill",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.where",
torch_opinfo_name="where",
op=lambda self, condition, other: refs.where(condition, self, other),
supports_nvfuser=False,
),
#
# Test-related functions
#
PythonRefInfo(
"_refs.allclose",
torch_opinfo_name="allclose",
supports_nvfuser=False,
),
#
# FFT OpInfos
#
SpectralFuncPythonRefInfo(
"_refs.fft.fft",
torch_opinfo_name="fft.fft",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.ifft",
torch_opinfo_name="fft.ifft",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.rfft",
torch_opinfo_name="fft.rfft",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.irfft",
torch_opinfo_name="fft.irfft",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.hfft",
torch_opinfo_name="fft.hfft",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.ihfft",
torch_opinfo_name="fft.ihfft",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.fftn",
torch_opinfo_name="fft.fftn",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.ifftn",
torch_opinfo_name="fft.ifftn",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.rfftn",
torch_opinfo_name="fft.rfftn",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.irfftn",
torch_opinfo_name="fft.irfftn",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.hfftn",
torch_opinfo_name="fft.hfftn",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.ihfftn",
torch_opinfo_name="fft.ihfftn",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.fft2",
torch_opinfo_name="fft.fft2",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.ifft2",
torch_opinfo_name="fft.ifft2",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.rfft2",
torch_opinfo_name="fft.rfft2",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.irfft2",
torch_opinfo_name="fft.irfft2",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.hfft2",
torch_opinfo_name="fft.hfft2",
supports_nvfuser=False,
),
SpectralFuncPythonRefInfo(
"_refs.fft.ihfft2",
torch_opinfo_name="fft.ihfft2",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.fft.fftshift",
torch_opinfo_name="fft.fftshift",
supports_nvfuser=False,
),
PythonRefInfo(
"_refs.fft.ifftshift",
torch_opinfo_name="fft.ifftshift",
supports_nvfuser=False,
),
]
# Common operator groupings
ops_and_refs = op_db + python_ref_db
unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo)]
binary_ufuncs = [op for op in op_db if isinstance(op, BinaryUfuncInfo)]
binary_ufuncs_and_refs = tuple(op for op in ops_and_refs if isinstance(op, BinaryUfuncInfo))
spectral_funcs = [op for op in op_db if isinstance(op, SpectralFuncInfo)]
sparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse]
sparse_csr_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse_csr]
sparse_reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo) and op.supports_sparse]
shape_funcs = [op for op in op_db if isinstance(op, ShapeFuncInfo)]
reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo)]
reference_filtered_ops = [op for op in reduction_ops if op.ref is not None]
reference_masked_ops = [op for op in reference_filtered_ops if op.name.startswith('_masked.')]
sparse_masked_reduction_ops = [op for op in sparse_reduction_ops if op.name.startswith('_masked.')]
# TODO: review porting these to make_tensor
def index_variable(shape, max_indices, device=torch.device('cpu')):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.rand(*shape, dtype=torch.double, device=device).mul_(max_indices).floor_().long()
return index
def gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')):
assert len(shape) == 2
assert index_dim < 2
batch_dim = 1 - index_dim
index = torch.zeros(*shape, dtype=torch.long, device=device)
for i in range(shape[index_dim]):
index.select(index_dim, i).copy_(
torch.randperm(max_indices, device=device)[:shape[batch_dim]])
if duplicate:
index.select(batch_dim, 0).copy_(index.select(batch_dim, 1))
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.bool).bernoulli_()
def mask_not_all_zeros(shape):
assert len(shape) > 0
while True:
result = torch.randn(shape).gt(0)
if result.sum() > 0:
return result
# TODO: move all tri/tril/triu testing to tensor creation op test suite and remove
# these from here
def _compare_trilu_indices(
self, row, col, offset=0, dtype=torch.long, device='cpu'):
if row == 0 or col == 0:
# have to handle this separately as tril and triu does not take
# empty matrix as input
self.assertEqual(
torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),
torch.tril_indices(row, col, offset, dtype=dtype, device=device))
self.assertEqual(
torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),
torch.triu_indices(row, col, offset, dtype=dtype, device=device))
else:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.ones(row, col, device='cpu')
.tril(offset).nonzero().to(dtype).transpose(0, 1),
torch.tril_indices(row, col, offset, dtype=dtype, device=device))
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.ones(row, col, device='cpu')
.triu(offset).nonzero().to(dtype).transpose(0, 1),
torch.triu_indices(row, col, offset, dtype=dtype, device=device))
def _compare_large_trilu_indices(
self, row, col, offset=0, dtype=torch.long, device='cpu'):
l = torch.ones(row, col, dtype=dtype, device='cpu').tril(offset) \
.nonzero()[-100:-1, :].transpose(0, 1).to(device)
torch.cuda.empty_cache()
r = torch.tril_indices(
row, col, offset, dtype=dtype, device=device)[:, -100:-1]
self.assertEqual(l, r)
torch.cuda.empty_cache()
l = torch.ones(row, col, dtype=dtype, device='cpu').triu(offset) \
.nonzero()[-100:-1, :].transpose(0, 1).to(device)
torch.cuda.empty_cache()
r = torch.triu_indices(
row, col, offset, dtype=dtype, device=device)[:, -100:-1]
self.assertEqual(l, r)
torch.cuda.empty_cache()
# (
# row
# col
# offset (optional)
# dtype (optional)
# )
tri_tests_args = [
(1, 1),
(3, 3),
(3, 3, 1),
(3, 3, 2),
(3, 3, 200),
(3, 3, -1),
(3, 3, -2),
(3, 3, -200),
(0, 3, 0),
(0, 3, 1),
(0, 3, -1),
(0, 1, 2),
(1, 0, 2),
(3, 0, 0),
(3, 0, 1),
(3, 0, -1),
(0, 0, 0),
(0, 0, 1),
(0, 0, -1),
(3, 6, 0),
(3, 6, 1),
(3, 6, 3),
(3, 6, 9),
(3, 6, -1),
(3, 6, -3),
(3, 6, -9),
(6, 3, 0),
(6, 3, 1),
(6, 3, 3),
(6, 3, 9),
(6, 3, -1),
(6, 3, -3),
(6, 3, -9),
(258, 253, 1, torch.float32),
(257, 258, 1, torch.float64),
(258, 258, 1, torch.short),
(3, 513, 1, torch.long),
(513, 3, 1, torch.int),
(513, 0, 1, torch.double),
(1024, 1024),
(1024, 1024, 500, torch.float32),
(1024, 1024, 1023),
(1024, 1024, -500),
(1023, 1025),
(1025, 1023, 1022),
(1024, 1024, -500),
(3, 2028),
(3, 2028, 1),
(3, 2028, -1),
(2028, 3),
(2028, 1),
(2028, 1, -1)
]
tri_large_tests_args: List[Tuple[int, ...]] = [
# Large test cases below are deliberately commented out to speed up CI
# tests and to avoid OOM error. When modifying implementations of
# tril_indices and triu_indices, please enable these tests and make sure
# they pass.
#
# (1, 268435455),
# (5000, 5000),
# (10000, 10000),
# (268435455, 1),
# (134217727, 2, 1),
# (2, 134217727, 1),
# (536870901, 1),
# (1, 536870901),
# (268435455, 2, 1),
# (2, 268435455, 1)
]
def run_additional_tri_tests(self, device):
x = torch.ones(
3, 3, dtype=torch.long, device=device, layout=torch.strided)
l = x.tril(0).nonzero().transpose(0, 1)
u = x.triu(0).nonzero().transpose(0, 1)
self.assertEqual(l, torch.tril_indices(3, 3, device=device))
self.assertEqual(
l, torch.tril_indices(3, 3, device=device, layout=torch.strided))
self.assertEqual(u, torch.triu_indices(3, 3, device=device))
self.assertEqual(
u, torch.triu_indices(3, 3, device=device, layout=torch.strided))
self.assertRaises(
RuntimeError,
lambda: torch.triu_indices(
1, 1, device=device, layout=torch.sparse_coo))
self.assertRaises(
RuntimeError,
lambda: torch.tril_indices(
1, 1, device=device, layout=torch.sparse_coo))
| pytorch-master | torch/testing/_internal/common_methods_invocations.py |
import torch
from torch.utils._pytree import tree_map
from typing import Iterator, List
import logging
import contextlib
import itertools
from torch.utils._python_dispatch import TorchDispatchMode
# How the chain of calls works for LoggingTensor:
# 1. Call torch.sin
# 2. Attempt __torch_function__. In LoggingTensor torch function is disabled so we bypass it entirely
# 3. Enter dispatcher, wind your way through Autograd
# 4. Hit Python dispatch key, call __torch_dispatch__
# This Tensor can work with autograd in two ways:
# - The wrapped Tensor does not require gradients. In that case, the LoggingTensor
# can require gradients if the user asks for it as a constructor kwarg.
# - The wrapped Tensor can require gradients. In that case autograd will be tracked
# for the wrapped Tensor and the LoggingTensor itself cannot require gradients.
# WARNING: We allow these two possibilities for testing purposes. You should NEVER use both in a single
# test or you might get surprising behavior.
# TODO: TensorBase should work
class LoggingTensor(torch.Tensor):
elem: torch.Tensor
__slots__ = ['elem']
context = contextlib.nullcontext
__torch_function__ = torch._C._disabled_torch_function_impl
@staticmethod
def __new__(cls, elem, *args, **kwargs):
# The wrapping tensor (LoggingTensor) shouldn't hold any
# memory for the class in question, but it should still
# advertise the same device as before
r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
cls, elem.size(),
strides=elem.stride(), storage_offset=elem.storage_offset(),
# TODO: clone storage aliasing
dtype=elem.dtype, layout=elem.layout,
device=elem.device, requires_grad=kwargs.get("requires_grad", False)
)
# ...the real tensor is held as an element on the tensor.
r.elem = elem.detach() if r.requires_grad else elem
return r
def __repr__(self):
return super().__repr__(tensor_contents=f"{self.elem}")
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
def unwrap(e):
return e.elem if isinstance(e, cls) else e
def wrap(e):
return cls(e) if isinstance(e, torch.Tensor) else e
with cls.context():
rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs)))
logging.getLogger("LoggingTensor").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs)
return rs
class LoggingTensorMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
rs = func(*args, **kwargs)
logging.getLogger("LoggingTensor").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs)
return rs
class LoggingTensorReentrant(LoggingTensor):
context = torch.overrides.enable_reentrant_dispatch
# https://stackoverflow.com/questions/36408496/python-logging-handler-to-append-to-list
class LoggingTensorHandler(logging.Handler):
log_list: List[str]
next_shortid: int
def __init__(self, log_list: List[str], use_shortid_for_all_tensors: bool) -> None:
logging.Handler.__init__(self)
self.log_list = log_list
self.next_shortid = 0
self.use_shortid_for_all_tensors = use_shortid_for_all_tensors
# WARNING: not deterministic over multiple threads, this matters for
# autograd
def _shortid(self, o: object) -> int:
if not hasattr(o, '_shortid'):
o._shortid = self.next_shortid # type: ignore[attr-defined]
self.next_shortid += 1
return o._shortid # type: ignore[attr-defined]
def _fmt(self, a: object) -> str:
cond_cls = torch.Tensor if self.use_shortid_for_all_tensors else LoggingTensor
return f'${self._shortid(a)}' if isinstance(a, cond_cls) else repr(a)
def emit(self, record):
fmt_args = ", ".join(itertools.chain(
(self._fmt(a) for a in record.args[0]),
(f"{k}={self._fmt(v)}" for k, v in record.args[1].items())
))
fmt_rets = ", ".join(self._fmt(a) for a in record.args[2]) \
if isinstance(record.args[2], (list, tuple)) else self._fmt(record.args[2])
self.log_list.append(f'{fmt_rets} = {record.msg}({fmt_args})')
def log_input(name: str, var: object):
logging.getLogger("LoggingTensor").info("input", (name,), {}, (var,))
@contextlib.contextmanager
def capture_logs(is_mode=False) -> Iterator[List[str]]:
logger = logging.getLogger("LoggingTensor")
log_list: List[str] = []
handler = LoggingTensorHandler(log_list, use_shortid_for_all_tensors=is_mode)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logger.propagate = False
try:
yield log_list
finally:
logger.removeHandler(handler)
@contextlib.contextmanager
def capture_logs_with_logging_tensor_mode():
with LoggingTensorMode(), capture_logs(True) as logs:
yield logs
| pytorch-master | torch/testing/_internal/logging_tensor.py |
from collections import defaultdict
from collections.abc import Iterable
import numpy as np
import torch
import hypothesis
from functools import reduce
from hypothesis import assume
from hypothesis import settings
from hypothesis import strategies as st
from hypothesis.extra import numpy as stnp
from hypothesis.strategies import SearchStrategy
from torch.testing._internal.common_quantized import _calculate_dynamic_qparams, _calculate_dynamic_per_channel_qparams
# Setup for the hypothesis tests.
# The tuples are (torch_quantized_dtype, zero_point_enforce), where the last
# element is enforced zero_point. If None, any zero_point point within the
# range of the data type is OK.
# Tuple with all quantized data types.
_ALL_QINT_TYPES = (
torch.quint8,
torch.qint8,
torch.qint32,
)
# Enforced zero point for every quantized data type.
# If None, any zero_point point within the range of the data type is OK.
_ENFORCED_ZERO_POINT = defaultdict(lambda: None, {
torch.quint8: None,
torch.qint8: None,
torch.qint32: 0
})
def _get_valid_min_max(qparams):
scale, zero_point, quantized_type = qparams
adjustment = 1 + torch.finfo(torch.float).eps
_long_type_info = torch.iinfo(torch.long)
long_min, long_max = _long_type_info.min / adjustment, _long_type_info.max / adjustment
# make sure intermediate results are within the range of long
min_value = max((long_min - zero_point) * scale, (long_min / scale + zero_point))
max_value = min((long_max - zero_point) * scale, (long_max / scale + zero_point))
return np.float32(min_value), np.float32(max_value)
# This wrapper wraps around `st.floats` and checks the version of `hypothesis`, if
# it is too old, removes the `width` parameter (which was introduced)
# in 3.67.0
def _floats_wrapper(*args, **kwargs):
if 'width' in kwargs and hypothesis.version.__version_info__ < (3, 67, 0):
# As long as nan, inf, min, max are not specified, reimplement the width
# parameter for older versions of hypothesis.
no_nan_and_inf = (
(('allow_nan' in kwargs and not kwargs['allow_nan']) or
'allow_nan' not in kwargs) and
(('allow_infinity' in kwargs and not kwargs['allow_infinity']) or
'allow_infinity' not in kwargs))
min_and_max_not_specified = (
len(args) == 0 and
'min_value' not in kwargs and
'max_value' not in kwargs
)
if no_nan_and_inf and min_and_max_not_specified:
if kwargs['width'] == 16:
kwargs['min_value'] = torch.finfo(torch.float16).min
kwargs['max_value'] = torch.finfo(torch.float16).max
elif kwargs['width'] == 32:
kwargs['min_value'] = torch.finfo(torch.float32).min
kwargs['max_value'] = torch.finfo(torch.float32).max
elif kwargs['width'] == 64:
kwargs['min_value'] = torch.finfo(torch.float64).min
kwargs['max_value'] = torch.finfo(torch.float64).max
kwargs.pop('width')
return st.floats(*args, **kwargs)
def floats(*args, **kwargs):
if 'width' not in kwargs:
kwargs['width'] = 32
return _floats_wrapper(*args, **kwargs)
"""Hypothesis filter to avoid overflows with quantized tensors.
Args:
tensor: Tensor of floats to filter
qparams: Quantization parameters as returned by the `qparams`.
Returns:
True
Raises:
hypothesis.UnsatisfiedAssumption
Note: This filter is slow. Use it only when filtering of the test cases is
absolutely necessary!
"""
def assume_not_overflowing(tensor, qparams):
min_value, max_value = _get_valid_min_max(qparams)
assume(tensor.min() >= min_value)
assume(tensor.max() <= max_value)
return True
"""Strategy for generating the quantization parameters.
Args:
dtypes: quantized data types to sample from.
scale_min / scale_max: Min and max scales. If None, set to 1e-3 / 1e3.
zero_point_min / zero_point_max: Min and max for the zero point. If None,
set to the minimum and maximum of the quantized data type.
Note: The min and max are only valid if the zero_point is not enforced
by the data type itself.
Generates:
scale: Sampled scale.
zero_point: Sampled zero point.
quantized_type: Sampled quantized type.
"""
@st.composite
def qparams(draw, dtypes=None, scale_min=None, scale_max=None,
zero_point_min=None, zero_point_max=None):
if dtypes is None:
dtypes = _ALL_QINT_TYPES
if not isinstance(dtypes, (list, tuple)):
dtypes = (dtypes,)
quantized_type = draw(st.sampled_from(dtypes))
_type_info = torch.iinfo(quantized_type)
qmin, qmax = _type_info.min, _type_info.max
# TODO: Maybe embed the enforced zero_point in the `torch.iinfo`.
_zp_enforced = _ENFORCED_ZERO_POINT[quantized_type]
if _zp_enforced is not None:
zero_point = _zp_enforced
else:
_zp_min = qmin if zero_point_min is None else zero_point_min
_zp_max = qmax if zero_point_max is None else zero_point_max
zero_point = draw(st.integers(min_value=_zp_min, max_value=_zp_max))
if scale_min is None:
scale_min = torch.finfo(torch.float).eps
if scale_max is None:
scale_max = torch.finfo(torch.float).max
scale = draw(floats(min_value=scale_min, max_value=scale_max, width=32))
return scale, zero_point, quantized_type
"""Strategy to create different shapes.
Args:
min_dims / max_dims: minimum and maximum rank.
min_side / max_side: minimum and maximum dimensions per rank.
Generates:
Possible shapes for a tensor, constrained to the rank and dimensionality.
Example:
# Generates 3D and 4D tensors.
@given(Q = qtensor(shapes=array_shapes(min_dims=3, max_dims=4))
some_test(self, Q):...
"""
@st.composite
def array_shapes(draw, min_dims=1, max_dims=None, min_side=1, max_side=None, max_numel=None):
"""Return a strategy for array shapes (tuples of int >= 1)."""
assert(min_dims < 32)
if max_dims is None:
max_dims = min(min_dims + 2, 32)
assert(max_dims < 32)
if max_side is None:
max_side = min_side + 5
candidate = st.lists(st.integers(min_side, max_side), min_size=min_dims, max_size=max_dims)
if max_numel is not None:
candidate = candidate.filter(lambda x: reduce(int.__mul__, x, 1) <= max_numel)
return draw(candidate.map(tuple))
"""Strategy for generating test cases for tensors.
The resulting tensor is in float32 format.
Args:
shapes: Shapes under test for the tensor. Could be either a hypothesis
strategy, or an iterable of different shapes to sample from.
elements: Elements to generate from for the returned data type.
If None, the strategy resolves to float within range [-1e6, 1e6].
qparams: Instance of the qparams strategy. This is used to filter the tensor
such that the overflow would not happen.
Generates:
X: Tensor of type float32. Note that NaN and +/-inf is not included.
qparams: (If `qparams` arg is set) Quantization parameters for X.
The returned parameters are `(scale, zero_point, quantization_type)`.
(If `qparams` arg is None), returns None.
"""
@st.composite
def tensor(draw, shapes=None, elements=None, qparams=None):
if isinstance(shapes, SearchStrategy):
_shape = draw(shapes)
else:
_shape = draw(st.sampled_from(shapes))
if qparams is None:
if elements is None:
elements = floats(-1e6, 1e6, allow_nan=False, width=32)
X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape))
assume(not (np.isnan(X).any() or np.isinf(X).any()))
return X, None
qparams = draw(qparams)
if elements is None:
min_value, max_value = _get_valid_min_max(qparams)
elements = floats(min_value, max_value, allow_infinity=False,
allow_nan=False, width=32)
X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape))
# Recompute the scale and zero_points according to the X statistics.
scale, zp = _calculate_dynamic_qparams(X, qparams[2])
enforced_zp = _ENFORCED_ZERO_POINT.get(qparams[2], None)
if enforced_zp is not None:
zp = enforced_zp
return X, (scale, zp, qparams[2])
@st.composite
def per_channel_tensor(draw, shapes=None, elements=None, qparams=None):
if isinstance(shapes, SearchStrategy):
_shape = draw(shapes)
else:
_shape = draw(st.sampled_from(shapes))
if qparams is None:
if elements is None:
elements = floats(-1e6, 1e6, allow_nan=False, width=32)
X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape))
assume(not (np.isnan(X).any() or np.isinf(X).any()))
return X, None
qparams = draw(qparams)
if elements is None:
min_value, max_value = _get_valid_min_max(qparams)
elements = floats(min_value, max_value, allow_infinity=False,
allow_nan=False, width=32)
X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape))
# Recompute the scale and zero_points according to the X statistics.
scale, zp = _calculate_dynamic_per_channel_qparams(X, qparams[2])
enforced_zp = _ENFORCED_ZERO_POINT.get(qparams[2], None)
if enforced_zp is not None:
zp = enforced_zp
# Permute to model quantization along an axis
axis = int(np.random.randint(0, X.ndim, 1))
permute_axes = np.arange(X.ndim)
permute_axes[0] = axis
permute_axes[axis] = 0
X = np.transpose(X, permute_axes)
return X, (scale, zp, axis, qparams[2])
"""Strategy for generating test cases for tensors used in Conv.
The resulting tensors is in float32 format.
Args:
spatial_dim: Spatial Dim for feature maps. If given as an iterable, randomly
picks one from the pool to make it the spatial dimension
batch_size_range: Range to generate `batch_size`.
Must be tuple of `(min, max)`.
input_channels_per_group_range:
Range to generate `input_channels_per_group`.
Must be tuple of `(min, max)`.
output_channels_per_group_range:
Range to generate `output_channels_per_group`.
Must be tuple of `(min, max)`.
feature_map_range: Range to generate feature map size for each spatial_dim.
Must be tuple of `(min, max)`.
kernel_range: Range to generate kernel size for each spatial_dim. Must be
tuple of `(min, max)`.
max_groups: Maximum number of groups to generate.
elements: Elements to generate from for the returned data type.
If None, the strategy resolves to float within range [-1e6, 1e6].
qparams: Strategy for quantization parameters. for X, w, and b.
Could be either a single strategy (used for all) or a list of
three strategies for X, w, b.
Generates:
(X, W, b, g): Tensors of type `float32` of the following drawen shapes:
X: (`batch_size, input_channels, H, W`)
W: (`output_channels, input_channels_per_group) + kernel_shape
b: `(output_channels,)`
groups: Number of groups the input is divided into
Note: X, W, b are tuples of (Tensor, qparams), where qparams could be either
None or (scale, zero_point, quantized_type)
Example:
@given(tensor_conv(
spatial_dim=2,
batch_size_range=(1, 3),
input_channels_per_group_range=(1, 7),
output_channels_per_group_range=(1, 7),
feature_map_range=(6, 12),
kernel_range=(3, 5),
max_groups=4,
elements=st.floats(-1.0, 1.0),
qparams=qparams()
))
"""
@st.composite
def tensor_conv(
draw, spatial_dim=2, batch_size_range=(1, 4),
input_channels_per_group_range=(3, 7),
output_channels_per_group_range=(3, 7), feature_map_range=(6, 12),
kernel_range=(3, 7), max_groups=1, can_be_transposed=False,
elements=None, qparams=None
):
# Resolve the minibatch, in_channels, out_channels, iH/iW, iK/iW
batch_size = draw(st.integers(*batch_size_range))
input_channels_per_group = draw(
st.integers(*input_channels_per_group_range))
output_channels_per_group = draw(
st.integers(*output_channels_per_group_range))
groups = draw(st.integers(1, max_groups))
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
if isinstance(spatial_dim, Iterable):
spatial_dim = draw(st.sampled_from(spatial_dim))
feature_map_shape = []
for i in range(spatial_dim):
feature_map_shape.append(draw(st.integers(*feature_map_range)))
kernels = []
for i in range(spatial_dim):
kernels.append(draw(st.integers(*kernel_range)))
tr = False
weight_shape = (output_channels, input_channels_per_group) + tuple(kernels)
bias_shape = output_channels
if can_be_transposed:
tr = draw(st.booleans())
if tr:
weight_shape = (input_channels, output_channels_per_group) + tuple(kernels)
bias_shape = output_channels
# Resolve the tensors
if qparams is not None:
if isinstance(qparams, (list, tuple)):
assert(len(qparams) == 3), "Need 3 qparams for X, w, b"
else:
qparams = [qparams] * 3
X = draw(tensor(shapes=(
(batch_size, input_channels) + tuple(feature_map_shape),),
elements=elements, qparams=qparams[0]))
W = draw(tensor(shapes=(weight_shape,), elements=elements,
qparams=qparams[1]))
b = draw(tensor(shapes=(bias_shape,), elements=elements,
qparams=qparams[2]))
return X, W, b, groups, tr
# We set the deadline in the currently loaded profile.
# Creating (and loading) a separate profile overrides any settings the user
# already specified.
hypothesis_version = hypothesis.version.__version_info__
current_settings = settings._profiles[settings._current_profile].__dict__
current_settings['deadline'] = None
if hypothesis_version >= (3, 16, 0) and hypothesis_version < (5, 0, 0):
current_settings['timeout'] = hypothesis.unlimited
def assert_deadline_disabled():
if hypothesis_version < (3, 27, 0):
import warnings
warning_message = (
"Your version of hypothesis is outdated. "
"To avoid `DeadlineExceeded` errors, please update. "
"Current hypothesis version: {}".format(hypothesis.__version__)
)
warnings.warn(warning_message)
else:
assert settings().deadline is None
| pytorch-master | torch/testing/_internal/hypothesis_utils.py |
r"""This file is allowed to initialize CUDA context when imported."""
import functools
import torch
import torch.cuda
from torch.testing._internal.common_utils import TEST_NUMBA, IS_WINDOWS
import inspect
import contextlib
from distutils.version import LooseVersion
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
CUDA_DEVICE = torch.device("cuda:0") if TEST_CUDA else None
# note: if ROCm is targeted, TEST_CUDNN is code for TEST_MIOPEN
TEST_CUDNN = TEST_CUDA and torch.backends.cudnn.is_acceptable(torch.tensor(1., device=CUDA_DEVICE))
TEST_CUDNN_VERSION = torch.backends.cudnn.version() if TEST_CUDNN else 0
CUDA11OrLater = torch.version.cuda and LooseVersion(torch.version.cuda) >= "11.0"
CUDA9 = torch.version.cuda and torch.version.cuda.startswith('9.')
SM53OrLater = torch.cuda.is_available() and torch.cuda.get_device_capability() >= (5, 3)
SM60OrLater = torch.cuda.is_available() and torch.cuda.get_device_capability() >= (6, 0)
SM80OrLater = torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 0)
TEST_MAGMA = TEST_CUDA
if TEST_CUDA:
torch.ones(1).cuda() # has_magma shows up after cuda is initialized
TEST_MAGMA = torch.cuda.has_magma
if TEST_NUMBA:
import numba.cuda
TEST_NUMBA_CUDA = numba.cuda.is_available()
else:
TEST_NUMBA_CUDA = False
# Used below in `initialize_cuda_context_rng` to ensure that CUDA context and
# RNG have been initialized.
__cuda_ctx_rng_initialized = False
# after this call, CUDA context and RNG must have been initialized on each GPU
def initialize_cuda_context_rng():
global __cuda_ctx_rng_initialized
assert TEST_CUDA, 'CUDA must be available when calling initialize_cuda_context_rng'
if not __cuda_ctx_rng_initialized:
# initialize cuda context and rng for memory tests
for i in range(torch.cuda.device_count()):
torch.randn(1, device="cuda:{}".format(i))
__cuda_ctx_rng_initialized = True
# Test whether hardware TF32 math mode enabled. It is enabled only on:
# - CUDA >= 11
# - arch >= Ampere
def tf32_is_not_fp32():
if not torch.cuda.is_available() or torch.version.cuda is None:
return False
if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8:
return False
if int(torch.version.cuda.split('.')[0]) < 11:
return False
return True
@contextlib.contextmanager
def tf32_off():
old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32
try:
torch.backends.cuda.matmul.allow_tf32 = False
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
yield
finally:
torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul
@contextlib.contextmanager
def tf32_on(self, tf32_precision=1e-5):
old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32
old_precision = self.precision
try:
torch.backends.cuda.matmul.allow_tf32 = True
self.precision = tf32_precision
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
yield
finally:
torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul
self.precision = old_precision
# This is a wrapper that wraps a test to run this test twice, one with
# allow_tf32=True, another with allow_tf32=False. When running with
# allow_tf32=True, it will use reduced precision as pecified by the
# argument. For example:
# @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
# @tf32_on_and_off(0.005)
# def test_matmul(self, device, dtype):
# a = ...; b = ...;
# c = torch.matmul(a, b)
# self.assertEqual(c, expected)
# In the above example, when testing torch.float32 and torch.complex64 on CUDA
# on a CUDA >= 11 build on an >=Ampere architecture, the matmul will be running at
# TF32 mode and TF32 mode off, and on TF32 mode, the assertEqual will use reduced
# precision to check values.
#
# This decorator can be used for function with or without device/dtype, such as
# @tf32_on_and_off(0.005)
# def test_my_op(self)
# @tf32_on_and_off(0.005)
# def test_my_op(self, device)
# @tf32_on_and_off(0.005)
# def test_my_op(self, device, dtype)
# @tf32_on_and_off(0.005)
# def test_my_op(self, dtype)
# if neither device nor dtype is specified, it will check if the system has ampere device
# if device is specified, it will check if device is cuda
# if dtype is specified, it will check if dtype is float32 or complex64
# tf32 and fp32 are different only when all the three checks pass
def tf32_on_and_off(tf32_precision=1e-5):
def with_tf32_disabled(self, function_call):
with tf32_off():
function_call()
def with_tf32_enabled(self, function_call):
with tf32_on(self, tf32_precision):
function_call()
def wrapper(f):
params = inspect.signature(f).parameters
arg_names = tuple(params.keys())
@functools.wraps(f)
def wrapped(*args, **kwargs):
for k, v in zip(arg_names, args):
kwargs[k] = v
cond = tf32_is_not_fp32()
if 'device' in kwargs:
cond = cond and (torch.device(kwargs['device']).type == 'cuda')
if 'dtype' in kwargs:
cond = cond and (kwargs['dtype'] in {torch.float32, torch.complex64})
if cond:
with_tf32_disabled(kwargs['self'], lambda: f(**kwargs))
with_tf32_enabled(kwargs['self'], lambda: f(**kwargs))
else:
f(**kwargs)
return wrapped
return wrapper
# This is a wrapper that wraps a test to run it with TF32 turned off.
# This wrapper is designed to be used when a test uses matmul or convolutions
# but the purpose of that test is not testing matmul or convolutions.
# Disabling TF32 will enforce torch.float tensors to be always computed
# at full precision.
def with_tf32_off(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
with tf32_off():
return f(*args, **kwargs)
return wrapped
def _get_magma_version():
if 'Magma' not in torch.__config__.show():
return (0, 0)
position = torch.__config__.show().find('Magma ')
version_str = torch.__config__.show()[position + len('Magma '):].split('\n')[0]
return tuple(int(x) for x in version_str.split("."))
def _get_torch_cuda_version():
if torch.version.cuda is None:
return (0, 0)
cuda_version = str(torch.version.cuda)
return tuple(int(x) for x in cuda_version.split("."))
def _check_cusparse_generic_available():
version = _get_torch_cuda_version()
min_supported_version = (10, 1)
if IS_WINDOWS:
min_supported_version = (11, 0)
return version >= min_supported_version
TEST_CUSPARSE_GENERIC = _check_cusparse_generic_available()
| pytorch-master | torch/testing/_internal/common_cuda.py |
r"""Importing this file includes common utility methods and base clases for
checking quantization api and properties of resulting modules.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.intrinsic.quantized.dynamic as nniqd
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
from torch.nn.intrinsic import _FusedModule
import torch.distributed as dist
from torch.testing._internal.common_utils import TestCase
from torch.ao.quantization import (
QuantType,
default_dynamic_qat_qconfig,
default_embedding_qat_qconfig,
default_symmetric_qnnpack_qat_qconfig,
)
from torch.quantization import QuantWrapper, QuantStub, DeQuantStub, \
default_qconfig, default_dynamic_qconfig, default_per_channel_qconfig, QConfig, default_observer, default_weight_observer, \
propagate_qconfig_, convert, get_default_qconfig, quantize_dynamic_jit, quantize_jit, float_qparams_weight_only_qconfig, \
get_default_qat_qconfig, PerChannelMinMaxObserver, default_dynamic_quant_observer, quantize
from torch.quantization.quantization_mappings import (
get_default_dynamic_quant_module_mappings,
get_default_qconfig_propagation_list,
get_default_qat_module_mappings,
)
from torch.testing._internal.common_quantized import (
override_quantized_engine,
)
from torch.jit.mobile import _load_for_lite_interpreter
try:
# graph mode quantization based on fx
from torch.ao.quantization.quantize_fx import (
prepare_fx,
prepare_qat_fx,
convert_fx,
convert_to_reference_fx,
)
from torch.ao.ns.fx.ns_types import NSSingleResultValuesType, NSSubgraph
from torch.fx.graph import Node
from torch.fx import GraphModule
HAS_FX = True
except ImportError:
HAS_FX = False
import copy
import io
import functools
import time
import os
import unittest
import numpy as np
from torch.testing import FileCheck
from typing import Callable, Tuple, Dict, Any, Union, Type, Optional
class NodeSpec:
''' Used for checking GraphModule Node
'''
def __init__(self, op, target):
'''
op: call_function | call_module
target:
for call_function, target would be a function
for call_module, target would be the type of PyTorch module
'''
self.op = op
self.target = target
@classmethod
def call_function(cls, target):
return NodeSpec('call_function', target)
@classmethod
def call_method(cls, target):
return NodeSpec('call_method', target)
@classmethod
def call_module(cls, target):
return NodeSpec('call_module', target)
def __hash__(self):
return hash((self.op, self.target))
def __eq__(self, other):
if not isinstance(other, NodeSpec):
return NotImplemented
return self.op == other.op and self.target == other.target
def __repr__(self):
return repr(self.op) + " " + repr(self.target)
def test_only_eval_fn(model, calib_data):
r"""
Default evaluation function takes a torch.utils.data.Dataset or a list of
input Tensors and run the model on the dataset
"""
for inp in calib_data:
output = model(*inp)
_default_loss_fn = torch.nn.CrossEntropyLoss()
def test_only_train_fn(model, train_data, loss_fn=_default_loss_fn):
r"""
Default train function takes a torch.utils.data.Dataset and train the model
on the dataset
"""
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
train_loss, correct, total = 0, 0, 0
for i in range(10):
model.train()
for data, target in train_data:
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(output, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
return train_loss, correct, total
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def train_one_epoch(model, criterion, optimizer, data_loader, device, ntrain_batches):
model.train()
cnt = 0
for image, target in data_loader:
start_time = time.time()
print('.', end='')
cnt += 1
image, target = image.to(device), target.to(device)
output = model(image)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if cnt >= ntrain_batches:
return
return
def ddp_setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=world_size)
def ddp_cleanup():
dist.destroy_process_group()
def run_ddp(rank, world_size, prepared):
ddp_setup(rank, world_size)
prepared.cuda()
prepared = torch.nn.parallel.DistributedDataParallel(prepared, device_ids=[rank])
prepared.to(rank)
model_with_ddp = prepared
optimizer = torch.optim.SGD(model_with_ddp.parameters(), lr=0.0001)
train_one_epoch(model_with_ddp, criterion, optimizer, dataset, rank, 1)
ddp_cleanup()
def convert_dynamic(module):
convert(module, get_default_dynamic_quant_module_mappings(), inplace=True)
def prepare_dynamic(model, qconfig_dict=None):
propagate_qconfig_(model, qconfig_dict)
def _make_conv_test_input(
batch_size, in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, X_scale, X_zero_point, W_scale,
W_zero_point, use_bias, use_channelwise,
):
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
(X_value_min, X_value_max) = (0, 4)
X_init = torch.randint(
X_value_min, X_value_max,
(batch_size, in_channels,) + input_feature_map_size)
X = X_scale * (X_init - X_zero_point).float()
X_q = torch.quantize_per_tensor(
X, scale=X_scale, zero_point=X_zero_point, dtype=torch.quint8)
W_scale = W_scale * out_channels
W_zero_point = W_zero_point * out_channels
# Resize W_scale and W_zero_points arrays equal to out_channels
W_scale = W_scale[:out_channels]
W_zero_point = W_zero_point[:out_channels]
# For testing, we use small values for weights and for activations so that
# no overflow occurs in vpmaddubsw instruction. If the overflow occurs in
# qconv implementation and if there is no overflow.
# In reference we can't exactly match the results with reference.
# Please see the comment in qconv implementation file
# aten/src/ATen/native/quantized/cpu/qconv.cpp for more details.
(W_value_min, W_value_max) = (-5, 5)
# The operator expects them in the format
# (out_channels, in_channels/groups,) + kernel_size
W_init = torch.randint(
W_value_min, W_value_max,
(out_channels, in_channels_per_group,) + kernel_size)
b_init = torch.randint(0, 10, (out_channels,))
if use_channelwise:
W_shape = (-1, 1) + (1,) * len(kernel_size)
W_scales_tensor = torch.tensor(W_scale, dtype=torch.float)
W_zero_points_tensor = torch.tensor(W_zero_point, dtype=torch.float)
W = W_scales_tensor.reshape(*W_shape) * (
W_init.float() - W_zero_points_tensor.reshape(*W_shape)).float()
b = X_scale * W_scales_tensor * b_init.float()
W_q = torch.quantize_per_channel(
W, W_scales_tensor.double(), W_zero_points_tensor.long(), 0,
dtype=torch.qint8)
else:
W = W_scale[0] * (W_init - W_zero_point[0]).float()
b = X_scale * W_scale[0] * b_init.float()
W_q = torch.quantize_per_tensor(
W, scale=W_scale[0], zero_point=W_zero_point[0], dtype=torch.qint8)
return (X, X_q, W, W_q, b if use_bias else None)
def skipIfNoFBGEMM(fn):
reason = 'Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs with instruction set support AVX2 or newer.'
if isinstance(fn, type):
if 'fbgemm' not in torch.backends.quantized.supported_engines:
fn.__unittest_skip__ = True
fn.__unittest_skip_why__ = reason
return fn
@functools.wraps(fn)
def wrapper(*args, **kwargs):
if 'fbgemm' not in torch.backends.quantized.supported_engines:
raise unittest.SkipTest(reason)
else:
fn(*args, **kwargs)
return wrapper
def skipIfNoQNNPACK(fn):
reason = 'Quantized operations require QNNPACK.'
if isinstance(fn, type):
if 'qnnpack' not in torch.backends.quantized.supported_engines:
fn.__unittest_skip__ = True
fn.__unittest_skip_why__ = reason
return fn
@functools.wraps(fn)
def wrapper(*args, **kwargs):
if 'qnnpack' not in torch.backends.quantized.supported_engines:
raise unittest.SkipTest(reason)
else:
fn(*args, **kwargs)
return wrapper
@functools.wraps(fn)
def wrapper(*args, **kwargs):
if not torch.onnx._CAFFE2_ATEN_FALLBACK:
raise unittest.SkipTest(reason)
else:
fn(*args, **kwargs)
return wrapper
try:
import torchvision # noqa: F401
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skip_if_no_torchvision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
def get_script_module(model, tracing, data):
return torch.jit.trace(model, data) if tracing else torch.jit.script(model)
def lengths_to_offsets(t, offset_type=np.int64, use_begin_offset=True):
"""
Convert lengths to offsets for embedding_bag
"""
tt = np.zeros((t.shape[0] + 1,), dtype=offset_type)
tt[1:] = t
tt = torch.from_numpy(np.cumsum(tt, dtype=offset_type))
if use_begin_offset:
return tt[:-1]
return tt[1:]
# QuantizationTestCase used as a base class for testing quantization on modules
class QuantizationTestCase(TestCase):
def setUp(self):
super().setUp()
self.calib_data = [[torch.rand(2, 5, dtype=torch.float)] for _ in range(2)]
self.train_data = [[torch.rand(2, 5, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long)] for _ in range(2)]
self.img_data_1d = [[torch.rand(2, 3, 10, dtype=torch.float)]
for _ in range(2)]
self.img_data_2d = [[torch.rand(1, 3, 10, 10, dtype=torch.float)]
for _ in range(2)]
self.img_data_3d = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float)]
for _ in range(2)]
self.img_data_1d_train = [[torch.rand(2, 3, 10, dtype=torch.float),
torch.randint(0, 1, (1,), dtype=torch.long)]
for _ in range(2)]
self.img_data_2d_train = [[torch.rand(1, 3, 10, 10, dtype=torch.float),
torch.randint(0, 1, (1,), dtype=torch.long)]
for _ in range(2)]
self.img_data_3d_train = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float),
torch.randint(0, 1, (1,), dtype=torch.long)]
for _ in range(2)]
self.img_data_dict = {1 : self.img_data_1d,
2 : self.img_data_2d,
3 : self.img_data_3d}
# Quant types that produce statically quantized ops
self.static_quant_types = [QuantType.STATIC, QuantType.QAT]
# All quant types for (fx based) graph mode quantization
self.all_quant_types = [QuantType.DYNAMIC, QuantType.STATIC, QuantType.QAT]
def checkNoPrepModules(self, module):
r"""Checks the module does not contain child
modules for quantization prepration, e.g.
quant, dequant and observer
"""
self.assertFalse(hasattr(module, 'quant'))
self.assertFalse(hasattr(module, 'dequant'))
def checkNoQconfig(self, module):
r"""Checks the module does not contain qconfig
"""
self.assertFalse(hasattr(module, 'qconfig'))
for child in module.children():
self.checkNoQconfig(child)
def checkHasPrepModules(self, module):
r"""Checks the module contains child
modules for quantization prepration, e.g.
quant, dequant and observer
"""
self.assertTrue(hasattr(module, 'module'))
self.assertTrue(hasattr(module, 'quant'))
self.assertTrue(hasattr(module, 'dequant'))
def checkObservers(self, module, propagate_qconfig_list=None, prepare_custom_config_dict=None):
r"""Checks the module or module's leaf descendants
have observers in preperation for quantization
"""
if propagate_qconfig_list is None:
propagate_qconfig_list = get_default_qconfig_propagation_list()
if prepare_custom_config_dict is None:
prepare_custom_config_dict = {}
float_to_observed_module_class_mapping = prepare_custom_config_dict.get("float_to_observed_custom_module_class", {})
# check if a module is a leaf module, ignoring activation_post_process attribute
def is_leaf_module(module):
submodule_name_count = 0
for name, _ in module.named_children():
if name != 'activation_post_process':
submodule_name_count += 1
return submodule_name_count == 0
if hasattr(module, 'qconfig') and module.qconfig is not None and \
((is_leaf_module(module) and not isinstance(module, torch.nn.Sequential)
and type(module) in propagate_qconfig_list) or
type(module) in float_to_observed_module_class_mapping.keys()) and \
not isinstance(module, torch.quantization.DeQuantStub):
self.assertTrue(hasattr(module, 'activation_post_process'),
'module: ' + str(type(module)) + ' do not have observer')
# we don't need to check observers for child modules of the
# qat modules
if type(module) not in get_default_qat_module_mappings().values() and \
type(module) not in float_to_observed_module_class_mapping.values() and \
not isinstance(module, _FusedModule):
for child in module.children():
if type(child) in [nn.Dropout]:
continue
self.checkObservers(child, propagate_qconfig_list, prepare_custom_config_dict)
def checkQuantDequant(self, mod):
r"""Checks that mod has nn.Quantize and
nn.DeQuantize submodules inserted
"""
self.assertEqual(type(mod.quant), nnq.Quantize)
self.assertEqual(type(mod.dequant), nnq.DeQuantize)
def checkWrappedQuantizedLinear(self, mod):
r"""Checks that mod has been swapped for an nnq.Linear
module, the bias is qint32, and that the module
has Quantize and DeQuantize submodules
"""
self.assertEqual(type(mod.module), nnq.Linear)
self.checkQuantDequant(mod)
def checkQuantizedLinear(self, mod):
self.assertEqual(type(mod), nnq.Linear)
def checkDynamicQuantizedLinear(self, mod, dtype):
r"""Checks that mod has been swapped for an nnqd.Linear
module, the bias is float.
"""
self.assertEqual(type(mod), nnqd.Linear)
self.assertEqual(mod._packed_params.dtype, dtype)
def checkDynamicQuantizedLinearRelu(self, mod, dtype):
r"""Checks that mod has been swapped for an nnqd.Linear
module, the bias is float.
"""
self.assertEqual(type(mod), nniqd.LinearReLU)
self.assertEqual(mod._packed_params.dtype, dtype)
def check_eager_serialization(self, ref_model, loaded_model, x):
# Check state dict serialization and torch.save APIs
model_dict = ref_model.state_dict()
b = io.BytesIO()
torch.save(model_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
loaded_model.load_state_dict(loaded_dict)
ref_out = ref_model(*x)
load_out = loaded_model(*x)
def check_outputs(ref_out, load_out):
self.assertEqual(ref_out[0], load_out[0])
if isinstance(ref_out[1], tuple):
self.assertEqual(ref_out[1][0], load_out[1][0])
self.assertEqual(ref_out[1][1], load_out[1][1])
else:
self.assertEqual(ref_out[1], load_out[1])
check_outputs(ref_out, load_out)
b = io.BytesIO()
torch.save(ref_model, b)
b.seek(0)
loaded = torch.load(b)
load_out = loaded(*x)
check_outputs(ref_out, load_out)
def check_weight_bias_api(self, ref_model, weight_keys, bias_keys):
weight = ref_model.get_weight()
bias = ref_model.get_bias()
self.assertEqual(weight_keys ^ weight.keys(), set())
self.assertEqual(bias_keys ^ bias.keys(), set())
def checkDynamicQuantizedLSTM(self, mod, reference_module_type, dtype):
r"""Checks that mod has been swapped for an nnqd.LSTM type
module, the bias is float.
"""
wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'}
self.assertEqual(type(mod), reference_module_type)
for packed_params in mod._all_weight_values:
self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype])
def checkLinear(self, mod):
self.assertEqual(type(mod), torch.nn.Linear)
def checkDynamicQuantizedModule(self, mod, reference_module_type, dtype):
r"""Checks that mod has been swapped for an nnqd.Linear
module, the bias is float.
"""
wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'}
self.assertEqual(type(mod), reference_module_type)
if hasattr(mod, '_all_weight_values'):
for packed_params in mod._all_weight_values:
self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype])
def checkScriptable(self, orig_mod, calib_data, check_save_load=False):
scripted = torch.jit.script(orig_mod)
self._checkScriptable(orig_mod, scripted, calib_data, check_save_load)
# Use first calib_data entry as trace input
traced = torch.jit.trace(orig_mod, calib_data[0])
self._checkScriptable(orig_mod, traced, calib_data, check_save_load)
# Call this twice: once for a scripted module and once for a traced module
def _checkScriptable(self, orig_mod, script_mod, calib_data, check_save_load):
self._checkModuleCorrectnessAgainstOrig(orig_mod, script_mod, calib_data)
# Test save/load
buffer = io.BytesIO()
torch.jit.save(script_mod, buffer)
buffer.seek(0)
loaded_mod = torch.jit.load(buffer)
# Pending __get_state_ and __set_state__ support
# See tracking task https://github.com/pytorch/pytorch/issues/23984
if check_save_load:
self._checkModuleCorrectnessAgainstOrig(orig_mod, loaded_mod, calib_data)
def _checkModuleCorrectnessAgainstOrig(self, orig_mod, test_mod, calib_data):
for inp in calib_data:
ref_output = orig_mod(*inp)
scripted_output = test_mod(*inp)
self.assertEqual(scripted_output, ref_output)
def checkGraphModeOp(self, module, inputs, quantized_op, tracing=False, debug=False,
check=True, eval_mode=True, dynamic=False, qconfig=None):
if debug:
print('Testing:', str(module))
qconfig_dict = {'': get_default_qconfig(torch.backends.quantized.engine)}
if eval_mode:
module = module.eval()
if dynamic:
qconfig_dict = {'': default_dynamic_qconfig if qconfig is None else qconfig}
model = get_script_module(module, tracing, inputs[0]).eval()
if debug:
print('input graph:', model.graph)
models = {}
outputs = {}
for debug in [True, False]:
if dynamic:
models[debug] = quantize_dynamic_jit(model, qconfig_dict, debug=debug)
# make sure it runs
outputs[debug] = models[debug](inputs)
else:
# module under test can contain in-place ops, and we depend on
# input data staying constant for comparisons
inputs_copy = copy.deepcopy(inputs)
models[debug] = quantize_jit(
model, qconfig_dict, test_only_eval_fn, [inputs_copy], inplace=False,
debug=debug)
# make sure it runs
outputs[debug] = models[debug](*inputs[0])
if debug:
print('debug graph:', models[True].graph)
print('non debug graph:', models[False].graph)
if check:
# debug and non-debug option should have the same numerics
self.assertEqual(outputs[True], outputs[False])
# non debug graph should produce quantized op
FileCheck().check(quantized_op) \
.run(models[False].graph)
return models[False]
def checkGraphModuleNodes(
self, graph_module,
expected_node=None,
expected_node_occurrence=None,
expected_node_list=None):
""" Check if GraphModule contains the target node
Args:
graph_module: the GraphModule instance we want to check
expected_node, expected_node_occurrence, expected_node_list:
see docs for checkGraphModeFxOp
"""
nodes_in_graph = dict()
node_list = []
modules = dict(graph_module.named_modules(remove_duplicate=False))
for node in graph_module.graph.nodes:
n = None
if node.op == 'call_function' or node.op == 'call_method':
n = NodeSpec(node.op, node.target)
elif node.op == 'call_module':
n = NodeSpec(node.op, type(modules[node.target]))
if n is not None:
node_list.append(n)
if n in nodes_in_graph:
nodes_in_graph[n] += 1
else:
nodes_in_graph[n] = 1
if expected_node is not None:
self.assertTrue(expected_node in nodes_in_graph, 'node:' + str(expected_node) +
' not found in the graph module')
if expected_node_occurrence is not None:
for expected_node, occurrence in expected_node_occurrence.items():
if occurrence != 0:
self.assertTrue(
expected_node in nodes_in_graph,
'Check failed for node:' + str(expected_node) +
' not found')
self.assertTrue(
nodes_in_graph[expected_node] == occurrence,
'Check failed for node:' + str(expected_node) +
' Expected occurrence:' + str(occurrence) +
' Found occurrence:' + str(nodes_in_graph[expected_node]))
else:
self.assertTrue(
expected_node not in nodes_in_graph,
'Check failed for node:' + str(expected_node) +
' expected no occurrence but found')
if expected_node_list is not None:
cur_index = 0
for n in node_list:
if cur_index == len(expected_node_list):
return
if n == expected_node_list[cur_index]:
cur_index += 1
self.assertTrue(
cur_index == len(expected_node_list),
"Check failed for graph:" +
self.printGraphModule(graph_module, print_str=False) +
"Expected ordered list:" +
str(expected_node_list))
def printGraphModule(self, graph_module, print_str=True):
modules = dict(graph_module.named_modules(remove_duplicate=False))
node_infos = []
for n in graph_module.graph.nodes:
node_info = ' '.join(map(repr, [n.op, n.name, n.target, n.args, n.kwargs]))
if n.op == 'call_module':
node_info += ' module type: ' + repr(type(modules[n.target]))
node_infos.append(node_info)
str_to_print = '\n'.join(node_infos)
if print_str:
print(str_to_print)
return str_to_print
if HAS_FX:
def assert_types_for_matched_subgraph_pairs(
self,
matched_subgraph_pairs: Dict[str, Tuple[NSSubgraph, NSSubgraph]],
expected_types: Dict[str, Tuple[Tuple[Callable, Callable], Tuple[Callable, Callable]]],
gm_a: GraphModule,
gm_b: GraphModule,
) -> None:
"""
Verifies that the types specified in expected_types match
the underlying objects pointed to by the nodes in matched_subgraph_pairs.
An example successful test case:
matched_subgraph_pairs = {'x0': (graph_a_conv_0_node, graph_b_conv_0_node)}
expected_types = {'x0': (nn.Conv2d, nnq.Conv2d)}
The function tests for key equivalence, and verifies types with
instance checks.
"""
def _get_underlying_op_type(
node: Node, gm: GraphModule
) -> Union[Callable, str]:
if node.op == 'call_module':
mod = getattr(gm, node.target)
return type(mod)
else:
assert node.op in ('call_function', 'call_method')
return node.target
self.assertTrue(
len(matched_subgraph_pairs) == len(expected_types),
'Expected length of results to match, but got %d and %d' %
(len(matched_subgraph_pairs), len(expected_types))
)
for k, v in expected_types.items():
expected_types_a, expected_types_b = v
exp_type_start_a, exp_type_end_a = expected_types_a
exp_type_start_b, exp_type_end_b = expected_types_b
subgraph_a, subgraph_b = matched_subgraph_pairs[k]
act_type_start_a = _get_underlying_op_type(subgraph_a.start_node, gm_a)
act_type_start_b = _get_underlying_op_type(subgraph_b.start_node, gm_b)
act_type_end_a = _get_underlying_op_type(subgraph_a.end_node, gm_a)
act_type_end_b = _get_underlying_op_type(subgraph_b.end_node, gm_b)
types_match = (exp_type_start_a is act_type_start_a) and \
(exp_type_end_a is act_type_end_a) and \
(exp_type_start_b is act_type_start_b) and \
(exp_type_end_b is act_type_end_b)
self.assertTrue(
types_match,
'Type mismatch at %s: expected %s, got %s' %
(k, (exp_type_start_a, exp_type_end_a, exp_type_start_b, exp_type_end_b),
(act_type_start_a, act_type_end_a, act_type_start_b, act_type_end_b))
)
def assert_ns_compare_dict_valid(
self,
act_compare_dict: Dict[str, Dict[str, Dict[str, Any]]],
) -> None:
"""
Verifies that the act_compare_dict (output of Numeric Suite APIs) is valid:
1. for each layer, results are recorded for two models
2. number of seen tensors match
3. shapes of each pair of seen tensors match
"""
for layer_name, result_type_to_data in act_compare_dict.items():
for result_type, layer_data in result_type_to_data.items():
self.assertTrue(
len(layer_data) == 2,
f"Layer {layer_name} does not have exactly two model results.")
model_name_0, model_name_1 = layer_data.keys()
for res_idx in range(len(layer_data[model_name_0])):
layer_data_0 = layer_data[model_name_0][res_idx]
layer_data_1 = layer_data[model_name_1][res_idx]
self.assertTrue(
layer_data_0['type'] == layer_data_0['type'],
f"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same type.")
self.assertTrue(
len(layer_data_0['values']) ==
len(layer_data_1['values']),
f"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same number of seen Tensors.")
# F.conv1d weight has rank 3, and toq.conv1d unpacked weight
# has rank 4. For now, skip the length check for conv1d only.
is_weight_functional_conv1d = (
result_type == NSSingleResultValuesType.WEIGHT.value and
(
'conv1d' in layer_data_0['prev_node_target_type'] or
'conv1d' in layer_data_1['prev_node_target_type']
)
)
if not is_weight_functional_conv1d:
for idx in range(len(layer_data_0['values'])):
values_0 = layer_data_0['values'][idx]
values_1 = layer_data_1['values'][idx]
if isinstance(values_0, torch.Tensor):
self.assertTrue(
values_0.shape == values_1.shape,
f"Layer {layer_name}, {model_name_0} and {model_name_1} " +
f"have a shape mismatch at idx {idx}.")
elif isinstance(values_0, list):
values_0 = values_0[0]
values_1 = values_1[0]
self.assertTrue(
values_0.shape == values_1.shape,
f"Layer {layer_name}, {model_name_0} and {model_name_1} " +
f"have a shape mismatch at idx {idx}.")
else:
assert isinstance(values_0, tuple), \
f"unhandled type {type(values_0)}"
assert len(values_0) == 2
assert len(values_0[1]) == 2
assert values_0[0].shape == values_1[0].shape
assert values_0[1][0].shape == values_1[1][0].shape
assert values_0[1][1].shape == values_1[1][1].shape
# verify that ref_node_name is valid
ref_node_name_0 = layer_data_0['ref_node_name']
ref_node_name_1 = layer_data_1['ref_node_name']
prev_node_name_0 = layer_data_0['prev_node_name']
prev_node_name_1 = layer_data_1['prev_node_name']
if layer_data_0['type'] == NSSingleResultValuesType.NODE_OUTPUT.value:
self.assertTrue(ref_node_name_0 == prev_node_name_0)
self.assertTrue(ref_node_name_1 == prev_node_name_1)
elif layer_data_0['type'] == NSSingleResultValuesType.NODE_INPUT.value:
self.assertTrue(ref_node_name_0 != prev_node_name_0)
self.assertTrue(ref_node_name_1 != prev_node_name_1)
def checkGraphModeFxOp(
self,
model,
inputs,
quant_type,
expected_node=None,
expected_node_occurrence=None,
expected_node_list=None,
is_reference=False,
print_debug_info=False,
custom_qconfig_dict=None,
prepare_expected_node=None,
prepare_expected_node_occurrence=None,
prepare_expected_node_list=None,
prepare_custom_config=None,
backend_config=None):
""" Quantizes model with graph mode quantization on fx and check if the
quantized model contains the quantized_node
Args:
model: floating point torch.nn.Module
inputs: one positional sample input arguments for model
expected_node: NodeSpec
e.g. NodeSpec.call_function(torch.quantize_per_tensor)
expected_node_occurrence: a dict from NodeSpec to
expected number of occurences (int)
e.g. {NodeSpec.call_function(torch.quantize_per_tensor) : 1,
NodeSpec.call_method('dequantize'): 1}
expected_node_list: a list of NodeSpec, used to check the order
of the occurrence of Node
e.g. [NodeSpec.call_function(torch.quantize_per_tensor),
NodeSpec.call_module(nnq.Conv2d),
NodeSpec.call_function(F.hardtanh_),
NodeSpec.call_method('dequantize')]
is_reference: if True, enables reference mode
print_debug_info: if True, prints debug info
custom_qconfig_dict: overrides default qconfig_dict
prepare_expected_node: same as expected_node, but for prepare
prepare_expected_node_occurrence: same as
expected_node_occurrence, but for prepare
prepare_expected_node_list: same as expected_node_list, but
for prepare
Returns:
A dictionary with the following structure:
{
"prepared": ..., # the prepared model
"quantized": ..., # the quantized non-reference model
"quantized_reference": ..., # the quantized reference model
"result": ..., # the result for either quantized or
# quantized_reference model depending on the
# is_reference arguemnt
}
"""
# TODO: make img_data a single example instead of a list
if type(inputs) == list:
inputs = inputs[0]
if quant_type == QuantType.QAT:
qconfig = get_default_qat_qconfig(torch.backends.quantized.engine)
model.train()
elif quant_type == QuantType.STATIC:
qconfig = get_default_qconfig(torch.backends.quantized.engine)
model.eval()
else:
qconfig = default_dynamic_qconfig
model.eval()
if quant_type == QuantType.QAT:
prepare = prepare_qat_fx
else:
prepare = prepare_fx
qconfig_dict = {"": qconfig}
# overwrite qconfig_dict with custom_qconfig_dict
if custom_qconfig_dict is not None:
qconfig_dict = custom_qconfig_dict
prepared = prepare(
model, qconfig_dict,
example_inputs=inputs,
prepare_custom_config=prepare_custom_config,
backend_config=backend_config)
if not quant_type == QuantType.DYNAMIC:
prepared(*inputs)
if print_debug_info:
print()
print('quant type:\n', quant_type)
print('original model:\n', model)
print()
print('prepared model:\n', prepared)
self.checkGraphModuleNodes(
prepared, prepare_expected_node,
prepare_expected_node_occurrence, prepare_expected_node_list)
prepared_copy = copy.deepcopy(prepared)
qgraph = convert_fx(copy.deepcopy(prepared))
qgraph_reference = convert_to_reference_fx(copy.deepcopy(prepared))
result = qgraph(*inputs)
result_reference = qgraph_reference(*inputs)
qgraph_copy = copy.deepcopy(qgraph)
qgraph_reference_copy = copy.deepcopy(qgraph_reference)
qgraph_to_check = qgraph_reference if is_reference else qgraph
if print_debug_info:
print()
print('quantized model:\n', qgraph_to_check)
self.printGraphModule(qgraph_to_check)
print()
self.checkGraphModuleNodes(
qgraph_to_check, expected_node, expected_node_occurrence, expected_node_list)
return {"prepared": prepared_copy,
"quantized": qgraph_copy,
"quantized_reference": qgraph_reference_copy,
"quantized_output": result,
"quantized_reference_output": result_reference}
def checkEmbeddingSerialization(self, qemb, num_embeddings, embedding_dim, indices, offsets,
set_qconfig, is_emb_bag, dtype=torch.quint8):
# Test serialization of dynamic EmbeddingBag module using state_dict
if is_emb_bag:
inputs = [indices, offsets]
else:
inputs = [indices]
emb_dict = qemb.state_dict()
b = io.BytesIO()
torch.save(emb_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
embedding_unpack = torch.ops.quantized.embedding_bag_unpack
# Check unpacked weight values explicitly
for key in emb_dict:
if isinstance(emb_dict[key], torch._C.ScriptObject):
assert isinstance(loaded_dict[key], torch._C.ScriptObject)
emb_weight = embedding_unpack(emb_dict[key])
loaded_weight = embedding_unpack(loaded_dict[key])
self.assertEqual(emb_weight, loaded_weight)
# Check state dict serialization and torch.save APIs
if is_emb_bag:
loaded_qemb = nnq.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim,
include_last_offset=True, mode='sum', dtype=dtype)
else:
loaded_qemb = nnq.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim, dtype=dtype)
self.check_eager_serialization(qemb, loaded_qemb, inputs)
loaded_qemb.load_state_dict(loaded_dict)
self.assertEqual(embedding_unpack(qemb._packed_params._packed_weight),
embedding_unpack(loaded_qemb._packed_params._packed_weight))
# Test JIT serialization
self.checkScriptable(qemb, [inputs], check_save_load=True)
# Test from_float call
if is_emb_bag:
float_embedding = torch.nn.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim,
include_last_offset=True, scale_grad_by_freq=False, mode='sum')
else:
float_embedding = torch.nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim)
if set_qconfig:
float_qparams_observer = PerChannelMinMaxObserver.with_args(dtype=dtype,
qscheme=torch.per_channel_affine_float_qparams,
ch_axis=0)
float_embedding.qconfig = QConfig(activation=default_dynamic_quant_observer,
weight=float_qparams_observer)
prepare_dynamic(float_embedding)
float_embedding(*inputs)
if is_emb_bag:
q_embeddingbag = nnq.EmbeddingBag.from_float(float_embedding)
expected_name = "QuantizedEmbeddingBag"
else:
q_embeddingbag = nnq.Embedding.from_float(float_embedding)
expected_name = "QuantizedEmbedding"
q_embeddingbag(*inputs)
self.assertTrue(expected_name in str(q_embeddingbag))
class QuantizationLiteTestCase(QuantizationTestCase):
def setUp(self):
super().setUp()
def _create_quantized_model(self, model_class: Type[torch.nn.Module], **kwargs):
# Creates quantized model for testing mobile script modules
qengine = "qnnpack"
with override_quantized_engine(qengine):
qconfig = torch.quantization.get_default_qconfig(qengine)
model = model_class(**kwargs)
model = quantize(model, test_only_eval_fn, [self.calib_data])
return model
def _compare_script_and_mobile(self,
model: torch.nn.Module,
input: torch.Tensor):
# Compares the numerical outputs for script and lite modules
qengine = "qnnpack"
with override_quantized_engine(qengine):
script_module = torch.jit.script(model)
script_module_result = script_module(input)
max_retry = 5
for retry in range(1, max_retry + 1):
# retries `max_retry` times; breaks iff succeeds else throws exception
try:
buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer)
mobile_module_result = mobile_module(input)
torch.testing.assert_close(script_module_result, mobile_module_result)
mobile_module_forward_result = mobile_module.forward(input)
torch.testing.assert_close(script_module_result, mobile_module_forward_result)
mobile_module_run_method_result = mobile_module.run_method("forward", input)
torch.testing.assert_close(script_module_result, mobile_module_run_method_result)
except AssertionError as e:
if retry == max_retry:
raise e
else:
continue
break
# Below are a series of toy models to use in testing quantization
class SingleLayerLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 5),)
class AnnotatedSingleLayerLinearModel(torch.nn.Module):
def __init__(self, qengine='fbgemm'):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.fc1 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
def forward(self, x):
x = self.fc1(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 5),)
class SingleLayerLinearDynamicModel(torch.nn.Module):
def __init__(self, qengine='fbgemm'):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 5),)
class LinearAddModel(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = torch.add(x, 5)
x = self.fc2(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 5),)
class RNNDynamicModel(torch.nn.Module):
def __init__(self, mod_type):
super().__init__()
self.qconfig = default_dynamic_qconfig
if mod_type == 'GRU':
self.mod = torch.nn.GRU(2, 2).to(dtype=torch.float)
if mod_type == 'LSTM':
self.mod = torch.nn.LSTM(2, 2).to(dtype=torch.float)
def forward(self, x):
x = self.mod(x)
return x
class RNNCellDynamicModel(torch.nn.Module):
def __init__(self, mod_type):
super().__init__()
self.qconfig = default_dynamic_qconfig
if mod_type == 'GRUCell':
self.mod = torch.nn.GRUCell(2, 2).to(dtype=torch.float)
if mod_type == 'LSTMCell':
self.mod = torch.nn.LSTMCell(2, 2).to(dtype=torch.float)
if mod_type == 'RNNReLU':
self.mod = torch.nn.RNNCell(2, 2, nonlinearity='relu').to(dtype=torch.float)
if mod_type == 'RNNTanh':
self.mod = torch.nn.RNNCell(2, 2, nonlinearity='tanh').to(dtype=torch.float)
def forward(self, x):
x = self.mod(x)
return x
class LSTMwithHiddenDynamicModel(torch.nn.Module):
def __init__(self, qengine='fbgemm'):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.lstm = torch.nn.LSTM(2, 2).to(dtype=torch.float)
def forward(self, x, hid):
x, hid = self.lstm(x, hid)
return x, hid
class ConvModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
class ConvTransposeModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
class AnnotatedConvModel(torch.nn.Module):
def __init__(self, qengine):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.dequant(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
class AnnotatedConvTransposeModel(torch.nn.Module):
def __init__(self, qengine):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.dequant(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
class ConvBnModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
class AnnotatedConvBnModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.qconfig = default_qconfig
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.bn(x)
x = self.dequant(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
class ConvBnReLUModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
class AnnotatedConvBnReLUModel(torch.nn.Module):
def __init__(self, qengine='fbgemm'):
super(AnnotatedConvBnReLUModel, self).__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
self.relu = nn.ReLU(inplace=True)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.dequant(x)
return x
def fuse_model(self):
# TODO: remove this check and define two fuse_modules function on this module
if self.training:
torch.quantization.fuse_modules_qat(self, [['conv', 'bn', 'relu']], inplace=True)
else:
torch.quantization.fuse_modules(self, [['conv', 'bn', 'relu']], inplace=True)
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
class TwoLayerConvModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.conv2 = torch.nn.Conv2d(5, 5, 1, bias=False).to(dtype=torch.float)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
class TwoLayerLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 5),)
class LinearModelWithSubmodule(nn.Module):
def __init__(self):
super(LinearModelWithSubmodule, self).__init__()
self.subm = TwoLayerLinearModel()
self.fc = nn.Linear(5, 5)
def forward(self, x):
x = self.subm(x)
x = self.fc(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return self.subm.get_example_inputs()
class AnnotatedTwoLayerLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.fc2 = QuantWrapper(torch.nn.Linear(8, 5).to(dtype=torch.float))
self.fc2.qconfig = torch.quantization.get_default_qconfig("fbgemm")
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 5),)
class ActivationsTestModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig("fbgemm")
self.quant = torch.quantization.QuantStub()
self.hardswish = torch.nn.Hardswish().to(dtype=torch.float)
self.elu = torch.nn.ELU().to(dtype=torch.float)
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.hardswish(x)
x = self.elu(x)
x = self.dequant(x)
return x
class LinearReluModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.relu(self.fc(x))
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 5),)
class LinearReluLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 5),)
class LinearReluAddModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = torch.add(x, 5)
x = self.fc2(x)
self.relu = torch.nn.ReLU()
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 5),)
# TODO: self.fc should be self.conv
class ConvReluModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.relu(self.fc(x))
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
# TODO: self.fc should be self.conv
class ConvReluConvModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Conv2d(5, 5, 1).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
# TODO: self.fc should be self.conv
class ConvReluAddModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Conv2d(5, 5, 1).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = torch.add(x, 5)
x = self.fc2(x)
self.relu = torch.nn.ReLU()
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
class NormalizationTestModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.quantization.QuantStub()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.layer_norm = torch.nn.LayerNorm((8))
self.group_norm = torch.nn.GroupNorm(2, 8)
self.instance_norm1d = torch.nn.InstanceNorm1d(8)
self.instance_norm2d = torch.nn.InstanceNorm2d(8)
self.instance_norm3d = torch.nn.InstanceNorm3d(8)
def forward(self, x):
x = self.quant(x)
x = self.fc1(x)
x = self.layer_norm(x)
x = self.group_norm(x.unsqueeze(-1).repeat(1, 1, 3))
x = self.instance_norm1d(x)
x = self.instance_norm2d(x.unsqueeze(-1))
x = self.instance_norm3d(x.unsqueeze(-1))
return x
class NestedModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = TwoLayerLinearModel()
self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class AnnotatedNestedModel(torch.nn.Module):
def __init__(self, qengine):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = TwoLayerLinearModel()
self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
self.fc3.qconfig = default_qconfig
self.sub2.fc1 = QuantWrapper(self.sub2.fc1)
if qengine == 'fbgemm':
self.sub2.fc1.qconfig = default_per_channel_qconfig
else:
self.sub2.fc1.qconfig = default_qconfig
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class AnnotatedSubNestedModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = QuantWrapper(TwoLayerLinearModel())
self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
self.fc3.qconfig = default_qconfig
self.sub2.qconfig = default_qconfig
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class AnnotatedCustomConfigNestedModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = TwoLayerLinearModel()
self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
self.fc3.qconfig = default_qconfig
self.sub2.qconfig = default_qconfig
custom_options = {
'dtype': torch.quint8,
'qscheme': torch.per_tensor_affine
}
custom_qconfig = QConfig(activation=default_observer.with_args(**custom_options),
weight=default_weight_observer)
self.sub2.fc1.qconfig = custom_qconfig
self.sub2.fc1 = QuantWrapper(self.sub2.fc1)
self.sub2.fc2 = QuantWrapper(self.sub2.fc2)
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class QuantSubModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = QuantWrapper(TwoLayerLinearModel())
self.sub2.qconfig = default_qconfig
self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.fc3.qconfig = default_qconfig
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class InnerModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.relu1 = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float)
self.relu2 = torch.nn.ReLU()
def forward(self, x):
return self.relu2(self.fc2(self.relu1(self.fc1(x))))
def fuse_modules(self):
fusable_layers = []
named_children = list(self.named_children())
for idx, (current_name, layer) in enumerate(named_children):
if isinstance(layer, torch.nn.Linear):
if idx >= len(named_children) - 1:
break
if isinstance(named_children[idx + 1][1], torch.nn.ReLU):
fusable_layers.append([current_name,
named_children[idx + 1][0]])
# TODO: remove this check and define two fuse_modules function on this module
if self.training:
torch.ao.quantization.fuse_modules_qat(self, fusable_layers, inplace=True)
else:
torch.ao.quantization.fuse_modules(self, fusable_layers, inplace=True)
class FunctionalLinear(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.rand((5, 5))
self.bias = torch.zeros(5)
def forward(self, x):
return F.linear(x, self.weight, self.bias)
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 5),)
class SingleLayerFunctionalLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = FunctionalLinear()
def forward(self, x):
x = self.linear1(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return self.linear1.get_example_inputs()
class TwoLayerFunctionalLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = FunctionalLinear()
self.linear2 = FunctionalLinear()
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return self.linear1.get_example_inputs()
class FunctionalLinearAddModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = FunctionalLinear()
self.linear2 = FunctionalLinear()
def forward(self, x):
x = self.linear1(x)
x = torch.add(x, 5)
x = self.linear2(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return self.linear1.get_example_inputs()
class FunctionalLinearReluModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = FunctionalLinear()
def forward(self, x):
x = self.linear(x)
x = F.relu(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return self.linear.get_example_inputs()
class FunctionalLinearReluLinearModel(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = FunctionalLinear()
self.relu = nn.ReLU()
self.linear2 = FunctionalLinear()
def forward(self, x):
x = self.linear1(x)
x = self.relu(x)
x = self.linear2(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return self.linear1.get_example_inputs()
class FunctionalConv2d(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.rand(3, 3, 3, 3)
self.bias = torch.rand(3)
self.stride = (1, 1)
self.padding = (0, 0)
self.dilation = (1, 1)
self.groups = 1
def forward(self, x):
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def get_example_inputs(self) -> Tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
class SingleLayerFunctionalConvModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = FunctionalConv2d()
def forward(self, x):
x = self.conv1(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return self.conv1.get_example_inputs()
class TwoLayerFunctionalConvModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = FunctionalConv2d()
self.conv2 = FunctionalConv2d()
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return self.conv1.get_example_inputs()
class FunctionalConvReluModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = FunctionalConv2d()
def forward(self, x):
x = self.conv(x)
x = F.relu(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return self.conv.get_example_inputs()
class FunctionalConvReluConvModel(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = FunctionalConv2d()
self.relu = nn.ReLU()
self.conv2 = FunctionalConv2d()
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
return x
def get_example_inputs(self) -> Tuple[Any, ...]:
return self.conv1.get_example_inputs()
class SkipQuantModel(torch.nn.Module):
r"""We can skip quantization by explicitly
setting qconfig of a submodule to None
"""
def __init__(self):
super().__init__()
self.sub = InnerModule()
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
return self.fc(self.sub(x))
def fuse_modules(self):
self.sub.fuse_modules()
class AnnotatedSkipQuantModel(torch.nn.Module):
r"""We can skip quantization by explicitly
setting qconfig of a submodule to None
"""
def __init__(self, qengine):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.sub = QuantWrapper(InnerModule())
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
# don't quantize this fc
self.fc.qconfig = None
def forward(self, x):
return self.fc(self.sub(x))
def fuse_modules(self):
self.sub.module.fuse_modules()
class QuantStubModel(torch.nn.Module):
r"""A Module with manually inserted `QuantStub` and `DeQuantStub`
"""
def __init__(self):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig("qnnpack")
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.quant(x)
x = self.fc(x)
return self.dequant(x)
class ManualLinearQATModel(torch.nn.Module):
r"""A Module with manually inserted `QuantStub` and `DeQuantStub`
"""
def __init__(self, qengine):
super().__init__()
self.qconfig = torch.quantization.get_default_qat_qconfig(qengine)
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(1, 10).to(dtype=torch.float)
def forward(self, x):
x = self.quant(x)
x = self.fc1(x)
x = self.fc2(x)
return self.dequant(x)
class ManualDropoutQATModel(torch.nn.Module):
r"""A Module with manually inserted `QuantStub` and `DeQuantStub`
"""
def __init__(self, qengine):
super().__init__()
self.qconfig = torch.quantization.get_default_qat_qconfig(qengine)
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float)
self.dropout = torch.nn.Dropout(0.5)
def forward(self, x):
x = self.quant(x)
x = self.fc1(x)
x = self.dropout(x)
return self.dequant(x)
class ManualLinearDynamicQATModel(torch.nn.Module):
r"""A Module that uses a dynamic QAT by default.
"""
def __init__(self, qconfig=None):
super().__init__()
self.qconfig = qconfig or default_dynamic_qat_qconfig
self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(1, 10).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
class ManualConvLinearQATModel(torch.nn.Module):
r"""A module with manually inserted `QuantStub` and `DeQuantStub`
and contains both linear and conv modules
"""
def __init__(self, qconfig=None):
super().__init__()
self.qconfig = qconfig if qconfig else torch.quantization.get_default_qat_qconfig("qnnpack")
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.conv = torch.nn.Conv2d(3, 1, kernel_size=3).to(dtype=torch.float)
self.fc1 = torch.nn.Linear(64, 10).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(10, 10).to(dtype=torch.float)
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = x.view(-1, 64).contiguous()
x = self.fc1(x)
x = self.fc2(x)
return self.dequant(x)
class ManualConvLinearSymmQATModel(ManualConvLinearQATModel):
r"""Same as ManualConvLinearQATModule but with Symmetric Quantization.
Supported only with qnnpack.
"""
def __init__(self):
super().__init__(default_symmetric_qnnpack_qat_qconfig)
class ManualEmbeddingBagLinear(nn.Module):
def __init__(self):
super(ManualEmbeddingBagLinear, self).__init__()
self.emb = nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, mode='sum')
self.emb.qconfig = default_embedding_qat_qconfig
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.linear = nn.Linear(12, 1).to(dtype=torch.float)
self.qconfig = get_default_qat_qconfig("qnnpack")
def forward(self, input: torch.Tensor, offsets: Optional[torch.Tensor] = None,
per_sample_weights: Optional[torch.Tensor] = None):
x = self.emb(input, offsets, per_sample_weights)
x = self.quant(x)
x = self.linear(x)
return self.dequant(x)
class DeFusedEmbeddingBagLinear(nn.Module):
r"""A module to simulate QAT embedding bag with a linear layer,
this module uses a separate embedding and bagging op, similar
to that which is described in the EmbeddingBag documentation.
https://pytorch.org/docs/stable/generated/torch.nn.EmbeddingBag.html
"""
def __init__(self) -> None:
super().__init__()
self.emb = nn.Embedding(num_embeddings=10, embedding_dim=12)
self.emb.qconfig = default_embedding_qat_qconfig
self.bagging_op = torch.sum
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.linear = nn.Linear(12, 1).to(dtype=torch.float)
self.qconfig = get_default_qat_qconfig("qnnpack")
def forward(self, input: torch.Tensor) -> torch.Tensor:
x = self.bagging_op(self.emb(input), dim=1)
x = self.quant(x)
x = self.linear(x)
return self.dequant(x)
class SubModelForFusion(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float)
self.bn = nn.BatchNorm2d(2).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class SubModelWithoutFusion(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float)
self.relu = nn.ReLU(inplace=False).to(dtype=torch.float)
def forward(self, x):
return self.relu(self.conv(x))
class ModelForFusion(nn.Module):
def __init__(self, qconfig):
super().__init__()
self.conv1 = nn.Conv2d(3, 2, 1, bias=None).to(dtype=torch.float)
self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float)
self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float)
self.sub1 = SubModelForFusion()
self.sub2 = SubModelWithoutFusion()
self.fc = nn.Linear(36, 10).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.qconfig = qconfig
self.conv2 = nn.Conv3d(3, 2, (1, 1, 1), bias=None).to(dtype=torch.float)
self.relu2 = nn.ReLU(inplace=False).to(dtype=torch.float)
self.bn2 = nn.BatchNorm3d(2).to(dtype=torch.float)
self.relu3 = nn.ReLU(inplace=True).to(dtype=torch.float)
self.conv3 = nn.Conv1d(3, 3, 2).to(dtype=torch.float)
self.bn3 = nn.BatchNorm1d(3).to(dtype=torch.float)
self.relu4 = nn.ReLU(inplace=True).to(dtype=torch.float)
# don't quantize sub2
self.sub2.qconfig = None
self.fc.qconfig = None
def forward(self, x):
x = x.squeeze(2)
x = self.quant(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu4(x)
x = x.unsqueeze(2)
y = x.unsqueeze(2)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.sub1(x)
x = self.dequant(x)
x = self.sub2(x)
x = x.reshape(-1, 36).contiguous()
x = self.fc(x)
y = self.conv2(y)
y = self.relu2(y)
y = self.bn2(y)
y = self.relu3(y)
y = self.dequant(y)
return x
class ConvBNReLU(nn.Sequential):
def __init__(self):
super().__init__(
nn.Conv2d(3, 3, 1, 1, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(inplace=False)
)
class ModelWithSequentialFusion(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 3, 1)
self.relu1 = nn.ReLU(inplace=False)
layers = []
for i in range(3):
layers.append(ConvBNReLU())
self.features = nn.Sequential(*layers)
head = [nn.Linear(300, 10), nn.ReLU(inplace=False)]
self.classifier = nn.Sequential(*head)
self.seq = nn.Sequential()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.relu1(x)
x = self.features(x)
x = torch.reshape(x, (-1, 3 * 10 * 10))
x = self.classifier(x)
x = self.seq(x)
x = self.dequant(x)
return x
class ModelForFusionWithBias(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 2, 5, bias=True).to(dtype=torch.float)
self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float)
self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float)
self.conv2 = nn.Conv2d(2, 2, 1, bias=True).to(dtype=torch.float)
self.bn2 = nn.BatchNorm2d(2).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.dequant(x)
return x
class ModelForLinearBNFusion(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(20, 10)
self.bn = nn.BatchNorm1d(10)
nn.init.uniform_(self.bn.weight)
nn.init.uniform_(self.bn.bias)
def forward(self, x):
return self.bn(self.fc(x))
class DummyObserver(torch.nn.Module):
def calculate_qparams(self):
return 1.0, 0
def forward(self, x):
return x
class ModelForConvTransposeBNFusion(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.ConvTranspose1d(3, 3, 1)
self.bn1 = nn.BatchNorm1d(3)
self.conv2 = nn.ConvTranspose2d(3, 3, 1)
self.bn2 = nn.BatchNorm2d(3)
self.conv3 = nn.ConvTranspose3d(3, 3, 1)
self.bn3 = nn.BatchNorm3d(3)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = x.unsqueeze(2)
x = self.conv2(x)
x = self.bn2(x)
x = x.unsqueeze(2)
x = self.conv3(x)
x = self.bn3(x)
return x
class ModelWithFunctionals(torch.nn.Module):
def __init__(self):
super().__init__()
self.mycat = nnq.FloatFunctional()
self.myadd = nnq.FloatFunctional()
self.myadd_relu = nnq.FloatFunctional()
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
# self.my_scalar_add = nnq.FloatFunctional()
# self.my_scalar_mul = nnq.FloatFunctional()
def forward(self, x):
y = self.mycat.cat([x, x, x])
z = self.myadd.add(y, y)
w = self.myadd_relu.add_relu(z, z)
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
# w = self.my_scalar_add.add_scalar(w, -0.5)
# w = self.my_scalar_mul.mul_scalar(w, 0.5)
return w
class ResNetBase(torch.nn.Module):
def __init__(self):
super().__init__()
norm_layer = nn.BatchNorm2d
inplanes = 3
self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.bn1 = norm_layer(inplanes)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.downsample = torch.nn.Identity()
self.myop = nn.quantized.FloatFunctional()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = torch.nn.Linear(inplanes, 1)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
identity = self.downsample(x)
out = self.myop.add(out, identity)
out = self.relu2(out)
out = self.avgpool(out)
out = torch.flatten(out, 1)
out = self.fc(out)
return out
def fuse_model(self):
# TODO: remove this check and define two fuse_model function on this module
if self.training:
torch.ao.quantization.fuse_modules_qat(self, [['conv1', 'bn1', 'relu1']], inplace=True)
else:
torch.ao.quantization.fuse_modules(self, [['conv1', 'bn1', 'relu1']], inplace=True)
class ModelMultipleOps(torch.nn.Module):
def __init__(self):
super().__init__()
norm_layer = nn.BatchNorm2d
inplanes = 3
self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.bn1 = norm_layer(inplanes)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.downsample = torch.nn.Identity()
self.skip_add = nn.quantized.FloatFunctional()
self.cat = nn.quantized.FloatFunctional()
self.avgpool = nn.AdaptiveAvgPool2d((4, 4))
self.fc = nn.Linear(12, 6)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
identity = self.downsample(x)
out = self.skip_add.add(out, identity)
out = self.relu2(out)
out = self.avgpool(out)
out = self.conv2(out)
out = torch.nn.functional.max_pool2d(out, 2, 2)
out = self.cat.cat([out, out])
out = out.reshape(-1, 3 * 2 * 2)
out = self.fc(out)
return out
# Model to ensure consistency of fake quant with true quant
# Average pooling and mean operations are not modelled
# accurately with fake-quant so this model does not
# contain those operations
class ModelMultipleOpsNoAvgPool(torch.nn.Module):
def __init__(self):
super().__init__()
norm_layer = nn.BatchNorm2d
inplanes = 3
self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.bn1 = norm_layer(inplanes)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.skip_add = nn.quantized.FloatFunctional()
self.cat = nn.quantized.FloatFunctional()
self.maxpool = nn.MaxPool2d((4, 4))
self.fc = nn.Linear(12, 6)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
skip = self.conv2(x)
out = self.skip_add.add(out, skip)
out = self.relu2(out)
out = self.maxpool(out)
out = self.conv2(out)
out = torch.nn.functional.max_pool2d(out, 2, 2)
out = self.cat.cat([out, out])
out = out.reshape(-1, 3 * 2 * 2)
out = self.fc(out)
return out
class EmbeddingBagModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12,
include_last_offset=True, scale_grad_by_freq=False, mode='sum')
def forward(self, indices, offsets, per_sample_weights):
return self.emb(indices, offsets, per_sample_weights)
class EmbeddingModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12)
def forward(self, indices):
return self.emb(indices)
class EmbeddingWithStaticLinear(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12)
self.fc = torch.nn.Linear(4, 2)
self.emb.qconfig = float_qparams_weight_only_qconfig
self.qconfig = default_qconfig
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, indices, offsets, linear_in):
emb = self.emb(indices, offsets)
q_x = self.quant(linear_in)
fc = self.fc(q_x)
fc = self.dequant(fc)
features = torch.cat([fc] + [emb], dim=1)
return features
class DenseTopMLP(nn.Module):
def __init__(self, dense_dim, dense_out, embedding_dim, top_out_in, top_out_out) -> None:
super(DenseTopMLP, self).__init__()
self.dense_mlp = nn.Sequential(
nn.Linear(dense_dim, dense_out),
)
self.top_mlp = nn.Sequential(
nn.Linear(dense_out + embedding_dim, top_out_in),
nn.Linear(top_out_in, top_out_out),
)
def forward(
self,
sparse_feature: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
dense_feature = self.dense_mlp(dense)
features = torch.cat([dense_feature] + [sparse_feature], dim=1)
out = self.top_mlp(features)
return out
# thin wrapper around embedding bag, because tracing inside nn.Embedding
# bag is not supported at the moment and this is top level
class EmbBagWrapper(nn.Module):
def __init__(self, num_embeddings, embedding_dim):
super().__init__()
self.emb_bag = nn.EmbeddingBag(num_embeddings, embedding_dim, mode='sum')
def forward(self, indices, offsets):
return self.emb_bag(indices, offsets)
class SparseNNModel(nn.Module):
_NUM_EMBEDDINGS = 10
_EMBEDDING_DIM = 5
_DENSE_DIM = 4
_DENSE_OUTPUT = 2
_TOP_OUT_IN = 2
_TOP_OUT_OUT = 2
_TOP_MLP_DIM = 1
def __init__(self) -> None:
super(SparseNNModel, self).__init__()
self.model_sparse = EmbBagWrapper(self._NUM_EMBEDDINGS, self._EMBEDDING_DIM)
self.dense_top = DenseTopMLP(
self._DENSE_DIM, self._DENSE_OUTPUT, self._EMBEDDING_DIM, self._TOP_OUT_IN,
self._TOP_OUT_OUT)
def forward(
self,
sparse_indices: torch.Tensor,
sparse_offsets: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
sparse_feature = self.model_sparse(sparse_indices, sparse_offsets)
out = self.dense_top(sparse_feature, dense)
return out
| pytorch-master | torch/testing/_internal/common_quantization.py |
import torch
import unittest
from copy import deepcopy
from enum import Enum
from functools import wraps, partial
from itertools import chain, product
import itertools
import torch.nn.functional as F
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import TEST_CUDNN
from torch.testing._internal.common_dtype import floating_types
from torch.testing._internal.common_device_type import (
_TestParametrizer, _update_param_kwargs, toleranceOverride, tol,
skipCUDAIfCudnnVersionLessThan, skipCUDAIfRocm, precisionOverride, skipMeta)
from torch.testing._internal.common_methods_invocations import DecorateInfo
from torch.testing._internal.common_nn import nllloss_reference, get_reduction
from torch.testing._internal.common_utils import (
freeze_rng_state, set_single_threaded_if_parallel_tbb, skipIfMps, GRADCHECK_NONDET_TOL, TEST_WITH_ROCM)
from types import ModuleType
from typing import List, Tuple, Type, Set, Dict
# List of all namespaces containing modules to test.
MODULE_NAMESPACES: List[ModuleType] = [
torch.nn.modules,
torch.nn.qat.modules,
torch.nn.quantizable.modules,
torch.nn.quantized.modules,
]
# Modules that shouldn't be tested for one reason or another.
MODULES_TO_SKIP: Set[Type] = {
torch.nn.Module, # abstract base class
torch.nn.Container, # deprecated
torch.nn.NLLLoss2d, # deprecated
torch.nn.quantized.MaxPool2d, # aliases to nn.MaxPool2d
}
# List of all module classes to test.
MODULE_CLASSES: List[Type] = list(chain(*[
[getattr(namespace, module_name) for module_name in namespace.__all__] # type: ignore[attr-defined]
for namespace in MODULE_NAMESPACES]))
MODULE_CLASSES = [cls for cls in MODULE_CLASSES if cls not in MODULES_TO_SKIP]
# Dict of module class -> common name. Useful for making test names more intuitive.
# Example: torch.nn.modules.linear.Linear -> "nn.Linear"
MODULE_CLASS_NAMES: Dict[Type, str] = {}
for namespace in MODULE_NAMESPACES:
for module_name in namespace.__all__: # type: ignore[attr-defined]
module_cls = getattr(namespace, module_name)
namespace_name = namespace.__name__.replace('torch.', '').replace('.modules', '')
MODULE_CLASS_NAMES[module_cls] = f'{namespace_name}.{module_name}'
# Specifies the modes (i.e. train, eval) to test over.
TrainEvalMode = Enum('TrainEvalMode', ('train_only', 'eval_only', 'train_and_eval'))
class modules(_TestParametrizer):
""" PROTOTYPE: Decorator for specifying a list of modules over which to run a test. """
def __init__(self, module_info_list, allowed_dtypes=None, train_eval_mode=TrainEvalMode.train_and_eval):
self.module_info_list = module_info_list
self.allowed_dtypes = set(allowed_dtypes) if allowed_dtypes is not None else None
self.train_eval_mode = train_eval_mode
def _get_training_flags(self, module_info):
training_flags = []
if (self.train_eval_mode == TrainEvalMode.train_only or
self.train_eval_mode == TrainEvalMode.train_and_eval):
training_flags.append(True)
if (self.train_eval_mode == TrainEvalMode.eval_only or
self.train_eval_mode == TrainEvalMode.train_and_eval):
training_flags.append(False)
# If train and eval modes don't differ for the module, don't bother using more than one.
if not module_info.train_and_eval_differ:
training_flags = training_flags[:1]
return training_flags
def _parametrize_test(self, test, generic_cls, device_cls):
if device_cls is None:
raise RuntimeError('The @modules decorator is only intended to be used in a device-specific '
'context; use it with instantiate_device_type_tests() instead of '
'instantiate_parametrized_tests()')
for module_info in self.module_info_list:
dtypes = set(module_info.dtypes)
if self.allowed_dtypes is not None:
dtypes = dtypes.intersection(self.allowed_dtypes)
training_flags = self._get_training_flags(module_info)
for (training, dtype) in product(training_flags, dtypes):
# Construct the test name; device / dtype parts are handled outside.
# See [Note: device and dtype suffix placement]
test_name = module_info.formatted_name
if len(training_flags) > 1:
test_name += f"_{'train_mode' if training else 'eval_mode'}"
# Construct parameter kwargs to pass to the test.
param_kwargs = {'module_info': module_info}
_update_param_kwargs(param_kwargs, 'dtype', dtype)
_update_param_kwargs(param_kwargs, 'training', training)
try:
@wraps(test)
def test_wrapper(*args, **kwargs):
return test(*args, **kwargs)
for decorator in module_info.get_decorators(generic_cls.__name__, test.__name__,
device_cls.device_type, dtype):
test_wrapper = decorator(test_wrapper)
yield (test_wrapper, test_name, param_kwargs)
except Exception as ex:
# Provides an error message for debugging before rethrowing the exception
print("Failed to instantiate {0} for module {1}!".format(test_name, module_info.name))
raise ex
def get_module_fully_qualified_name(module_cls):
""" Returns the common name of the module class formatted for use in test names. """
return MODULE_CLASS_NAMES[module_cls]
class FunctionInput(object):
""" Contains args and kwargs to pass as input to a function. """
__slots__ = ['args', 'kwargs']
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class ModuleInput(object):
""" Contains args / kwargs for module instantiation + forward pass. """
__slots__ = ['constructor_input', 'forward_input', 'desc', 'reference_fn']
def __init__(self, constructor_input, forward_input=None, desc='', reference_fn=None):
self.constructor_input = constructor_input # Inputs to pass during construction
self.forward_input = forward_input # Inputs to pass to forward()
self.desc = desc # Description for this set of inputs
self.reference_fn = reference_fn # Reference with signature: reference_fn(module, parameters, *args, **kwargs)
if reference_fn is not None:
@wraps(reference_fn)
def copy_reference_fn(m, *args, **kwargs):
# Copy inputs to avoid undesired side effects from calling the reference.
args, kwargs = deepcopy(args), deepcopy(kwargs)
# Note that module parameters are passed in for convenience.
return reference_fn(m, list(m.parameters()), *args, **kwargs)
self.reference_fn = copy_reference_fn
class ModuleInfo(object):
""" Module information to be used in testing. """
def __init__(self,
module_cls, # Class object for the module under test
*,
module_inputs_func, # Function to generate module inputs
skips=(), # Indicates which tests to skip
decorators=None, # Additional decorators to apply to generated tests
dtypes=floating_types(), # dtypes this function is expected to work with
supports_gradgrad=True, # whether the op supports second order gradients
gradcheck_nondet_tol=0.0, # tolerance for nondeterminism while performing gradcheck
module_memformat_affects_out=False, # whether converting module to channels last will generate
# channels last output
train_and_eval_differ=False, # whether the module has differing behavior between train and eval
):
self.module_cls = module_cls
self.module_inputs_func = module_inputs_func
self.decorators = (*(decorators if decorators else []), *(skips if skips else []))
self.dtypes = dtypes
self.supports_gradgrad = supports_gradgrad
self.gradcheck_nondet_tol = gradcheck_nondet_tol
self.module_memformat_affects_out = module_memformat_affects_out
self.train_and_eval_differ = train_and_eval_differ
def get_decorators(self, test_class, test_name, device, dtype):
result = [set_single_threaded_if_parallel_tbb]
for decorator in self.decorators:
if isinstance(decorator, DecorateInfo):
if decorator.is_active(test_class, test_name, device, dtype):
result.extend(decorator.decorators)
else:
result.append(decorator)
return result
@property
def name(self):
return get_module_fully_qualified_name(self.module_cls)
@property
def formatted_name(self):
return self.name.replace('.', '_')
def module_inputs_torch_nn_Linear(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
module_inputs = [
ModuleInput(constructor_input=FunctionInput(10, 8),
forward_input=FunctionInput(input=make_input((4, 10))),
reference_fn=lambda m, p, input: torch.mm(input, p[0].t()) + p[1].view(1, -1).expand(4, 8)),
ModuleInput(constructor_input=FunctionInput(10, 8, bias=False),
forward_input=FunctionInput(make_input((4, 10))),
desc='no_bias',
reference_fn=lambda m, p, i: torch.mm(i, p[0].t())),
ModuleInput(constructor_input=FunctionInput(3, 5),
forward_input=FunctionInput(make_input(3)),
desc='no_batch_dim',
reference_fn=lambda m, p, i: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1])
]
return module_inputs
def module_inputs_torch_nn_Bilinear(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def bilinear_reference_fn(m, p, x1, x2, bias=True):
result = torch.einsum('bn,anm,bm->ba', x1, p[0], x2)
if bias:
if x1.shape[0] == 1:
result = result.view(-1) + p[1]
else:
result = result + p[1].view(1, -1).expand(x1.shape[0], p[0].shape[0])
return result
module_inputs = [
ModuleInput(constructor_input=FunctionInput(2, 3, 4),
forward_input=FunctionInput(make_input((8, 2)), make_input((8, 3))),
reference_fn=lambda m, p, x1, x2: bilinear_reference_fn(m, p, x1, x2)),
ModuleInput(constructor_input=FunctionInput(2, 3, 4, bias=False),
forward_input=FunctionInput(make_input((8, 2)), make_input((8, 3))),
desc='no_bias',
reference_fn=lambda m, p, x1, x2: bilinear_reference_fn(m, p, x1, x2, bias=False)),
ModuleInput(constructor_input=FunctionInput(2, 3, 4),
forward_input=FunctionInput(make_input((2)), make_input((3))),
desc='no_batch_dim',
reference_fn=lambda m, p, x1, x2: bilinear_reference_fn(m, p, x1.view(1, -1), x2.view(1, -1))),
]
return module_inputs
def module_inputs_torch_nn_NLLLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
cases: List[Tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_none', {'reduction': 'none'}),
('ignore_index', {'ignore_index': 2}),
('weights', {'weight': make_weight(10).abs()}),
('weights_ignore_index', {'weight': make_weight(10).abs(), 'ignore_index': 2}),
('weights_ignore_index_neg', {'weight': make_weight(10).abs(), 'ignore_index': -1})
]
# TODO: Uncomment when negative weights is supported.
# negative_weight = make_weight(10)
# negative_weight[0] = -1
# cases.append(('weights_negative', {'weight': negative_weight}))
module_inputs = []
for desc, constructor_kwargs in cases:
def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs):
return nllloss_reference(i, t, **constructor_kwargs)
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((15, 10)).log_softmax(dim=1),
torch.empty(15, device=device).uniform_().mul(10).floor().long()),
desc=desc,
reference_fn=reference_fn)
)
return module_inputs
def module_inputs_torch_nn_GaussianNLLLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
cases: List[Tuple[str, dict]] = [
('', {}),
('reduction_sum', {'reduction': 'sum'}),
('reduction_mean', {'reduction': 'mean'}),
('reduction_none', {'reduction': 'none'}),
]
module_inputs = []
for desc, constructor_kwargs in cases:
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((3)),
make_target((3)),
make_input((1)).abs()),
desc=desc,
reference_fn=no_batch_dim_reference_fn)
)
return module_inputs
def no_batch_dim_reference_fn(m, p, *args, **kwargs):
"""Reference function for modules supporting no batch dimensions.
Unbatched inputs are unsqueezed to form a
single batch input before passing them to the module.
The output is squeezed to compare with the
output of unbatched input to the module.
Currently it only supports modules which return a single Tensor as output.
You can bind the following kwargs.
Kwargs:
batch_first[bool] : If True, all the Tensors in `args` while be unsqueezed at dim `0` .
and output will be squeezed at dim `0` else dim `1` for both.
kwargs_to_batchify[dict] : Dictionary specifying the name of the argument and dimension to unsqueeze.
Useful if there are few arguments whose batch dimension are different
from the ones selected by `batch_first`.
is_criterion[bool] : Specify if the module is a criterion and handle the reduction for output accordingly.
"""
def get_and_pop(key, default):
v = kwargs.get(key, default)
if key in kwargs:
kwargs.pop(key)
return v
batch_dim = 0 if get_and_pop('batch_first', True) else 1
kwargs_to_batchify = get_and_pop('kwargs_to_batchify', None)
is_criterion = get_and_pop('is_criterion', False)
if kwargs_to_batchify is not None:
assert isinstance(kwargs_to_batchify, dict)
for k, v in kwargs.items():
if k in kwargs_to_batchify and v is not None:
bdim = kwargs_to_batchify[k]
kwargs[k] = v.unsqueeze(bdim)
single_batch_input_args = [input.unsqueeze(batch_dim) for input in args]
with freeze_rng_state():
output = m(*single_batch_input_args, **kwargs).squeeze(batch_dim)
if is_criterion:
reduction = get_reduction(m)
if reduction == 'none':
return output.squeeze(0)
return output
def no_batch_dim_reference_mha(m, p, *args, **kwargs):
"""Reference function for MultiheadAttention supporting no batch dimensions.
Unbatched inputs are unsqueezed to form a
single batch input before passing them to the module.
The output is squeezed to compare with the
output of unbatched input to the module.
"""
batch_dim = 0 if kwargs.get('batch_first', True) else 1
if 'batch_first' in kwargs:
kwargs.pop('batch_first')
if 'key_padding_mask' in kwargs and kwargs['key_padding_mask'] is not None:
kwargs['key_padding_mask'] = kwargs['key_padding_mask'].unsqueeze(0)
single_batch_input_args = [input.unsqueeze(batch_dim) for input in args]
with freeze_rng_state():
output = m(*single_batch_input_args, **kwargs)
return (output[0].squeeze(batch_dim), output[1].squeeze(0))
def no_batch_dim_reference_rnn_gru(m, p, *args, **kwargs):
"""Reference function for RNN and GRU supporting no batch dimensions.
Unbatched inputs are unsqueezed to form a
single batch input before passing them to the module.
The output is squeezed to compare with the
output of unbatched input to the module.
"""
if len(args) == 1:
inp, = args
h = None
elif len(args) == 2:
inp, h = args
h = h.unsqueeze(1)
batch_dim = 0 if kwargs['batch_first'] else 1
kwargs.pop('batch_first')
inp = inp.unsqueeze(batch_dim)
single_batch_input_args = (inp, h)
with freeze_rng_state():
output = m(*single_batch_input_args, **kwargs)
return (output[0].squeeze(batch_dim), output[1].squeeze(1))
def no_batch_dim_reference_lstm(m, p, *args, **kwargs):
"""Reference function for LSTM supporting no batch dimensions.
Unbatched inputs are unsqueezed to form a
single batch input before passing them to the module.
The output is squeezed to compare with the
output of unbatched input to the module.
"""
if len(args) == 1:
inp, = args
h = None
elif len(args) == 2:
inp, h = args
h = (h[0].unsqueeze(1), h[1].unsqueeze(1))
batch_dim = 0 if kwargs['batch_first'] else 1
kwargs.pop('batch_first')
inp = inp.unsqueeze(batch_dim)
single_batch_input_args = (inp, h)
with freeze_rng_state():
output = m(*single_batch_input_args, **kwargs)
return (output[0].squeeze(batch_dim), (output[1][0].squeeze(1), output[1][1].squeeze(1)))
def no_batch_dim_reference_lstmcell(m, p, *args, **kwargs):
"""Reference function for LSTMCell supporting no batch dimensions.
The module is passed the input and target in batched form with a single item.
The output is squeezed to compare with the no-batch input.
"""
inp, (h, c) = args
single_batch_input_args = (inp.unsqueeze(0), (h.unsqueeze(0), c.unsqueeze(0)))
with freeze_rng_state():
output = m(*single_batch_input_args, **kwargs)
return (output[0].squeeze(0), output[1].squeeze(0))
def generate_regression_criterion_inputs(make_input):
return [
ModuleInput(
constructor_input=FunctionInput(reduction=reduction),
forward_input=FunctionInput(make_input((4, )), make_input(4,)),
reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True),
desc='no_batch_dim_{}'.format(reduction)
) for reduction in ['none', 'mean', 'sum']]
def module_inputs_torch_nn_AvgPool1d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(kernel_size=2),
forward_input=FunctionInput(make_input((3, 6))),
desc='no_batch_dim',
reference_fn=no_batch_dim_reference_fn)]
def module_inputs_torch_nn_AdaptiveAvgPool2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((1, 3, 5, 6))),
desc='single')]
def module_inputs_torch_nn_BatchNorm2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((2, 3, 6, 6))))]
def module_inputs_torch_nn_BatchNorm3d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(3,),
forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))))]
def module_inputs_torch_nn_ConvNd(module_info, device, dtype, requires_grad, training, **kwargs):
N = kwargs['N']
lazy = kwargs.get('lazy', False)
transposed = kwargs.get('transposed', False)
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
conv_kwargs_list = [{}] if transposed else [{}, {'padding': 'same'}]
kernel_size, C_in, C_out = 3, 4, 5
input_no_batch_shape = (C_in,) + tuple((i + 3 for i in range(N)))
input_batch_shape = (2,) + input_no_batch_shape
return [
ModuleInput(constructor_input=(FunctionInput(C_out, kernel_size, **conv_kwargs) if lazy else
FunctionInput(C_in, C_out, kernel_size, **conv_kwargs)),
forward_input=FunctionInput(make_input(
input_batch_shape if with_batch else input_no_batch_shape)),
desc=('' if with_batch else 'no_batch_dim'),
reference_fn=(None if with_batch else no_batch_dim_reference_fn))
for with_batch, conv_kwargs in itertools.product([True, False], conv_kwargs_list)
]
def module_inputs_torch_nn_ELU(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(alpha=2.),
forward_input=FunctionInput(make_input((3, 2, 5))),
reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2 * (i.exp() - 1))),
ModuleInput(constructor_input=FunctionInput(alpha=2.),
forward_input=FunctionInput(make_input(())),
desc='scalar'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((3,))),
desc='no_batch_dim',
reference_fn=no_batch_dim_reference_fn),
ModuleInput(constructor_input=FunctionInput(alpha=2.),
forward_input=FunctionInput(make_input((2, 3, 2, 5))),
desc='4d_input')]
def module_inputs_torch_nn_CELU(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(alpha=2.),
forward_input=FunctionInput(make_input((3, 2, 5))),
reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2. * ((.5 * i).exp() - 1))),
ModuleInput(constructor_input=FunctionInput(alpha=2.),
forward_input=FunctionInput(make_input(())),
reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2 * (i.exp() - 1)),
desc='scalar'),
ModuleInput(constructor_input=FunctionInput(alpha=2.),
forward_input=FunctionInput(make_input((3,))),
desc='no_batch_dim',
reference_fn=no_batch_dim_reference_fn)]
def module_inputs_torch_nn_ReLU(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
desc='no_batch_dim'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 4, 5))),
desc='channels_last_mem_format'),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))),
desc='channels_last_3d_mem_format')]
def module_inputs_torch_nn_L1Loss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 4)),
make_input((2, 3, 4))),
reference_fn=lambda m, p, i, t: 1. / i.numel() * sum((a - b).abs().sum()
for a, b in zip(i, t))),
ModuleInput(constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(()), make_input(())),
reference_fn=lambda m, p, i, t: 1. / i.numel() * (i - t).abs().sum(),
desc='scalar')] + generate_regression_criterion_inputs(make_input)
def module_inputs_torch_nn_CrossEntropyLoss(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False)
make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
reductions = ['sum', 'mean', 'none']
samples = []
# Samples below are for validating the no-batch-dim support.
for reduction in reductions:
samples.append(
ModuleInput(constructor_input=FunctionInput(reduction=reduction),
forward_input=FunctionInput(make_input((9,)), make_target((), low=0, high=9)),
reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True))
)
samples.append(
ModuleInput(constructor_input=FunctionInput(reduction=reduction, weight=make_weight((9,))),
forward_input=FunctionInput(make_input((9,)), make_target((), low=0, high=9)),
reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True))
)
samples.append(
ModuleInput(constructor_input=FunctionInput(reduction=reduction, label_smoothing=0.5),
forward_input=FunctionInput(make_input((9,)), make_target((), low=0, high=9)),
reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True))
)
samples.append(
ModuleInput(constructor_input=FunctionInput(reduction=reduction, label_smoothing=0.5,
weight=make_weight((9,))),
forward_input=FunctionInput(make_input((9,)), make_target((), low=0, high=9)),
reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True))
)
return samples
def module_inputs_torch_nn_Hardswish(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input(4)),
reference_fn=no_batch_dim_reference_fn,
desc='no_batch_dim',
),
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 2, 5))),
desc='4d_input')
]
def module_inputs_torch_nn_MaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput((3, 3), (2, 2), (1, 1)),
forward_input=FunctionInput(make_input(((3, 7, 7)))),
desc='3d_input'),
ModuleInput(
constructor_input=FunctionInput((3, 3), (2, 2), (1, 1)),
forward_input=FunctionInput(make_input((1, 3, 7, 7))),
desc='4d_input'),
ModuleInput(
constructor_input=FunctionInput((3, 3), (2, 2), (1, 1), return_indices=True),
forward_input=FunctionInput(make_input((1, 3, 7, 7))),
desc='return_indices'),
]
def module_inputs_torch_nn_Sigmoid(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 4, 5))),
desc='channels_last_mem_format'
),
ModuleInput(
constructor_input=FunctionInput(),
forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))),
desc='channels_last_3d_mem_format'
)
]
def module_inputs_torch_nn_TransformerEncoderLayer(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = [
ModuleInput(
constructor_input=FunctionInput(4, 2, 16, 0.0),
forward_input=FunctionInput(
make_input((2, 3, 4))
),
desc='relu_activation'
),
ModuleInput(
constructor_input=FunctionInput(4, 2, 8, 0.0, F.gelu),
forward_input=FunctionInput(
make_input((2, 3, 4))
),
desc='gelu_activation'
), ]
# Samples below are for validating the no-batch-dim support.
key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool))
attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3)))
for src_mask, src_key_padding_mask, norm_first in itertools.product(attn_masks, key_padding_masks, (True, False)):
samples.append(
ModuleInput(
constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8,
dropout=0.0, batch_first=True, norm_first=norm_first),
forward_input=FunctionInput(
make_input((3, 4)), src_mask=src_mask, src_key_padding_mask=src_key_padding_mask
),
reference_fn=partial(no_batch_dim_reference_fn,
batch_first=True, kwargs_to_batchify={'src_key_padding_mask': 0}),
desc='no_batch_dim_batch_first'
))
samples.append(
ModuleInput(
constructor_input=FunctionInput(4, 2, 8, dropout=0.0, batch_first=False, norm_first=norm_first),
forward_input=FunctionInput(
make_input((3, 4)), src_mask=src_mask, src_key_padding_mask=src_key_padding_mask
),
reference_fn=partial(no_batch_dim_reference_fn,
batch_first=False, kwargs_to_batchify={'src_key_padding_mask': 0}),
desc='no_batch_dim'
))
def fast_path_reference_fn(module, parameters, *args, **kwargs):
assert not module.training
module = module.train(True)
output = module(*args, **kwargs)
module = module.train(False)
return output
if not training:
for norm_first in (True, False):
samples.append(
ModuleInput(
constructor_input=FunctionInput(4, 2, 8, dropout=0.0, batch_first=True, norm_first=norm_first),
forward_input=FunctionInput(
make_input((2, 3, 4)),
),
reference_fn=fast_path_reference_fn,
desc="fast_path_norm_first" if norm_first else "fast_path"
)
)
return samples
def module_inputs_torch_nn_TransformerDecoderLayer(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = [
ModuleInput(
constructor_input=FunctionInput(4, 2, 16, 0.0),
forward_input=FunctionInput(
make_input((2, 3, 4)), make_input((2, 3, 4))
),
desc='relu_activation'
),
ModuleInput(
constructor_input=FunctionInput(4, 2, 8, 0.0, F.gelu),
forward_input=FunctionInput(
make_input((2, 3, 4)), make_input((2, 3, 4))
),
desc='gelu_activation'
), ]
# Samples below are for validating the no-batch-dim support.
key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool))
attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3)))
for tgt_mask, tgt_key_padding_mask, norm_first in itertools.product(attn_masks, key_padding_masks, (True, False)):
# Using same mask for tgt and memory
memory_mask = tgt_mask
memory_key_padding_mask = tgt_key_padding_mask
samples.append(
ModuleInput(
constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8,
dropout=0.0, batch_first=True, norm_first=norm_first),
forward_input=FunctionInput(
make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask
),
reference_fn=partial(no_batch_dim_reference_fn,
batch_first=True,
kwargs_to_batchify={'tgt_key_padding_mask': 0, 'memory_key_padding_mask': 0}),
desc='no_batch_dim_batch_first'
))
samples.append(
ModuleInput(
constructor_input=FunctionInput(4, 2, 8, dropout=0.0, batch_first=False, norm_first=norm_first),
forward_input=FunctionInput(
make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask
),
reference_fn=partial(no_batch_dim_reference_fn,
batch_first=False,
kwargs_to_batchify={'tgt_key_padding_mask': 0, 'memory_key_padding_mask': 0}),
desc='no_batch_dim'
))
return samples
def module_inputs_torch_nn_Transformer(module_info, device, dtype, requires_grad, training, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = []
# Samples below are for validating the no-batch-dim support.
key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool))
attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3)))
for mask, key_padding_mask, norm_first in itertools.product(attn_masks, key_padding_masks, (True, False)):
# Using same mask for tgt and memory
src_mask , tgt_mask = (mask,) * 2
src_key_padding_mask, tgt_key_padding_mask = (key_padding_mask,) * 2
samples.append(
ModuleInput(
constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8,
num_encoder_layers=1, num_decoder_layers=1,
dropout=0.0, batch_first=True, norm_first=norm_first),
forward_input=FunctionInput(
make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, src_mask=src_mask,
tgt_key_padding_mask=tgt_key_padding_mask, src_key_padding_mask=src_key_padding_mask
),
reference_fn=partial(no_batch_dim_reference_fn,
batch_first=True,
kwargs_to_batchify={'tgt_key_padding_mask': 0, 'src_key_padding_mask': 0}),
desc='no_batch_dim_batch_first'
))
samples.append(
ModuleInput(
constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8,
num_encoder_layers=1, num_decoder_layers=1,
dropout=0.0, batch_first=False, norm_first=norm_first),
forward_input=FunctionInput(
make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, src_mask=src_mask,
tgt_key_padding_mask=tgt_key_padding_mask, src_key_padding_mask=src_key_padding_mask
),
reference_fn=partial(no_batch_dim_reference_fn,
batch_first=False,
kwargs_to_batchify={'tgt_key_padding_mask': 0, 'src_key_padding_mask': 0}),
desc='no_batch_dim'
))
return samples
def module_inputs_torch_nn_Embedding(module_info, device, dtype, requires_grad, training, **kwargs):
make_empty = partial(torch.empty, device=device, dtype=torch.long, requires_grad=False)
return [
ModuleInput(
constructor_input=FunctionInput(num_embeddings=4, embedding_dim=3),
forward_input=FunctionInput(make_empty(2, 3).random_(4))
),
ModuleInput(
constructor_input=FunctionInput(num_embeddings=4, embedding_dim=3),
forward_input=FunctionInput(make_empty(1, 512).random_(4).expand(7, 512)),
desc='discontiguous'
),
]
def module_inputs_torch_nn_MultiheadAttention(module_info, device, dtype, requires_grad, training, **kwargs):
# Currently all samples below are for validating the no-batch-dim support.
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = []
bool_vals = (True, False)
key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool))
attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3, 3)))
products = itertools.product(bool_vals, bool_vals, bool_vals, key_padding_masks, attn_masks)
for bias, add_bias_kv, add_zero_attn, key_padding_mask, attn_mask in products:
samples.append(
ModuleInput(
constructor_input=FunctionInput(embed_dim=3, num_heads=3, batch_first=True,
bias=bias, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn),
forward_input=FunctionInput(make_input((3, 3)), make_input((3, 3)), make_input((3, 3)),
key_padding_mask=key_padding_mask, attn_mask=attn_mask),
reference_fn=no_batch_dim_reference_mha,
)
)
samples.append(
ModuleInput(
constructor_input=FunctionInput(embed_dim=3, num_heads=3, batch_first=False,
bias=bias, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn),
forward_input=FunctionInput(make_input((3, 3)), make_input((3, 3)), make_input((3, 3)),
key_padding_mask=key_padding_mask, attn_mask=attn_mask),
reference_fn=partial(no_batch_dim_reference_mha, batch_first=False),
)
)
return samples
def module_inputs_torch_nn_RNN_GRU_Cell(module_info, device, dtype, requires_grad, training, **kwargs):
# Currently all samples below are for validating the no-batch-dim support.
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = [
ModuleInput(
constructor_input=FunctionInput(5, 10),
forward_input=FunctionInput(make_input(5), make_input(10)),
reference_fn=no_batch_dim_reference_fn,
),
ModuleInput(
constructor_input=FunctionInput(5, 10, bias=True),
forward_input=FunctionInput(make_input(5), make_input(10)),
reference_fn=no_batch_dim_reference_fn,
)
]
is_rnn = kwargs.get('is_rnn', False)
if is_rnn:
# RNN also supports `nonlinearity` argument.
# `tanh` is the default, so we check with `relu`
samples.append(
ModuleInput(
constructor_input=FunctionInput(5, 10, bias=True, nonlinearity='relu'),
forward_input=FunctionInput(make_input(5), make_input(10)),
reference_fn=no_batch_dim_reference_fn,
)
)
return samples
def module_inputs_torch_nn_LSTMCell(module_info, device, dtype, requires_grad, training, **kwargs):
# Currently all samples below are for validating the no-batch-dim support.
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = (
ModuleInput(
constructor_input=FunctionInput(5, 10),
forward_input=FunctionInput(make_input(5), (make_input(10), make_input(10))),
reference_fn=no_batch_dim_reference_lstmcell,
),
ModuleInput(
constructor_input=FunctionInput(5, 10, bias=True),
forward_input=FunctionInput(make_input(5), (make_input(10), make_input(10))),
reference_fn=no_batch_dim_reference_lstmcell,
),
)
return samples
def module_inputs_torch_nn_RNN_GRU(module_info, device, dtype, requires_grad, training, **kwargs):
# Currently all samples below are for validating the no-batch-dim support.
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
is_rnn = kwargs['is_rnn']
nonlinearity = ('relu', 'tanh')
bias = (False, True)
batch_first = (False, True)
bidirectional = (False, True)
samples = []
if is_rnn:
prod_gen = product(nonlinearity, bias, batch_first, bidirectional)
else:
prod_gen = product(bias, batch_first, bidirectional)
for args in prod_gen:
if is_rnn:
nl, b, b_f, bidir = args
else:
b, b_f, bidir = args
cons_args = {'input_size': 2, 'hidden_size': 2, 'num_layers': 2,
'batch_first': b_f, 'bias': b, 'bidirectional': bidir}
cons_args_hidden = {'input_size': 2, 'hidden_size': 3, 'num_layers': 2,
'batch_first': b_f, 'bias': b, 'bidirectional': bidir}
if is_rnn:
cons_args['nonlinearity'] = nl
cons_args_hidden['nonlinearity'] = nl
samples.append(
ModuleInput(
constructor_input=FunctionInput(**cons_args),
forward_input=FunctionInput(make_input((2, 2))),
reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f),
)
)
samples.append(
ModuleInput(
constructor_input=FunctionInput(**cons_args_hidden),
forward_input=FunctionInput(make_input((3, 2)), make_input((4 if bidir else 2, 3))),
reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f),
)
)
return samples
def module_inputs_torch_nn_LSTM(module_info, device, dtype, requires_grad, training, **kwargs):
# Currently all samples below are for validating the no-batch-dim support.
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
bias = (False, True)
batch_first = (False, True)
bidirectional = (False, True)
proj_sizes = (0, 2)
samples = []
prod_gen = product(bias, batch_first, bidirectional, proj_sizes)
for args in prod_gen:
b, b_f, bidir, proj_size = args
hidden_size = 3
cons_args = {'input_size': 2, 'hidden_size': hidden_size, 'num_layers': 2, 'proj_size': proj_size,
'batch_first': b_f, 'bias': b, 'bidirectional': bidir}
cons_args_hidden = {'input_size': 2, 'hidden_size': hidden_size, 'num_layers': 2, 'proj_size': proj_size,
'batch_first': b_f, 'bias': b, 'bidirectional': bidir}
samples.append(
ModuleInput(
constructor_input=FunctionInput(**cons_args),
forward_input=FunctionInput(make_input((2, 2))),
reference_fn=partial(no_batch_dim_reference_lstm, batch_first=b_f),
)
)
h_out = proj_size if proj_size > 0 else hidden_size
hx = (make_input((4 if bidir else 2, h_out)), make_input((4 if bidir else 2, hidden_size)))
samples.append(
ModuleInput(
constructor_input=FunctionInput(**cons_args_hidden),
forward_input=FunctionInput(make_input((3, 2)), hx),
reference_fn=partial(no_batch_dim_reference_lstm, batch_first=b_f),
)
)
return samples
# All these operators share similar issues on cuDNN and MIOpen
rnn_gru_lstm_module_info_decorators = (
# RuntimeError: Batching rule not implemented for aten::_cudnn_rnn_backward.
# We could not generate a fallback
DecorateInfo(
unittest.expectedFailure, "TestModule", "test_grad",
active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda'
),
# NotImplementedError: the derivative for '_cudnn_rnn_backward' is not implemented.
# Double backwards is not supported for CuDNN RNNs due to limitations in the CuDNN API
DecorateInfo(
unittest.expectedFailure, "TestModule", "test_gradgrad",
active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda'
),
# CUDNN GRU doesn't accept non-contiguous hx
DecorateInfo(
unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors",
active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda'
),
# MIOPEN GRU doesn't accept non-contiguous hx (this is dispatched to miopen only for float).
DecorateInfo(
unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors",
active_if=(TEST_CUDNN and TEST_WITH_ROCM), dtypes=(torch.float,), device_type='cuda'
),
)
# Database of ModuleInfo entries in alphabetical order.
module_db: List[ModuleInfo] = [
ModuleInfo(torch.nn.AdaptiveAvgPool2d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool2d,
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.AvgPool1d,
module_inputs_func=module_inputs_torch_nn_AvgPool1d,
skips=(
# No channels_last support for AvgPool1d as it does not take 4D inputs
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.BatchNorm2d,
train_and_eval_differ=True,
module_inputs_func=module_inputs_torch_nn_BatchNorm2d,
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),),
decorators=(
# Failure on ROCM for BatchNorm2d float32 issue #70125
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),)
),
ModuleInfo(torch.nn.BatchNorm3d,
train_and_eval_differ=True,
module_inputs_func=module_inputs_torch_nn_BatchNorm3d,
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),),
decorators=(
# Failure on ROCM for BatchNorm3d float32 issue #70125
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),)
),
ModuleInfo(torch.nn.Conv1d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=False),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# channels_last support on cuda requires cudnn >= 7603
DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'),
# Failure on ROCM for float32 issue #70125
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64])
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.Conv2d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=False),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# channels_last support on cuda requires cudnn >= 7603
DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'),
# Failure on ROCM for float32 issue #70125
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format",
device_type='cuda', dtypes=[torch.float64]),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.Conv3d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=False),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# channels_last support on cuda requires cudnn >= 8005
DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'),
# Failure on ROCM for float32 issue #70125
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.ConvTranspose1d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=False, transposed=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# channels_last support on cuda requires cudnn >= 7603
DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'),
# Failure on ROCM for float32 issue #70125
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.ConvTranspose2d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=False, transposed=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# channels_last support on cuda requires cudnn >= 7603
DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'),
# Failure on ROCM for float32 issue #70125
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cpu'),
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda',
dtypes=[torch.float64]),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.ConvTranspose3d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=False, transposed=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# channels_last support on cuda requires cudnn >= 8005
DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'),
# Failure on ROCM for float32 issue #70125
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.ELU,
module_inputs_func=module_inputs_torch_nn_ELU,
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.L1Loss,
module_inputs_func=module_inputs_torch_nn_L1Loss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.LazyConv1d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# channels_last support on cuda requires cudnn >= 7603
DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'),
# Failure on ROCM for float32 issue #70125
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
DecorateInfo(skipMeta),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.LazyConv2d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),
# channels_last support on cuda requires cudnn >= 7603
DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'),
# Failure on ROCM for float32 issue #70125
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
DecorateInfo(skipMeta),
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format",
device_type='cuda', dtypes=[torch.float64]),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.LazyConv3d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# channels_last support on cuda requires cudnn >= 8005
DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'),
# Failure on ROCM for float32 issue #70125
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
DecorateInfo(skipMeta),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.LazyConvTranspose1d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=True, transposed=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# channels_last support on cuda requires cudnn >= 7603
DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'),
# Failure on ROCM for float32 issue #70125
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
DecorateInfo(skipMeta),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.LazyConvTranspose2d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=True, transposed=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# channels_last support on cuda requires cudnn >= 7603
DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'),
# Failure on ROCM for float32 issue #70125
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
DecorateInfo(skipMeta),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cpu'),
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda',
dtypes=[torch.float64]),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.LazyConvTranspose3d,
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=True, transposed=True),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
module_memformat_affects_out=True,
skips=(
# channels_last support on cuda requires cudnn >= 8005
DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'),
# Failure on ROCM for float32 issue #70125
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
DecorateInfo(skipMeta),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),
# This was wrongly being skipped before and needs investigation.
# See https://github.com/pytorch/pytorch/issues/80247
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),
),
decorators=(
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
)),
ModuleInfo(torch.nn.Linear,
module_inputs_func=module_inputs_torch_nn_Linear,
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),
# No channels_last support for Linear currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.Bilinear,
module_inputs_func=module_inputs_torch_nn_Bilinear,
decorators=[
DecorateInfo(
toleranceOverride({
torch.float32: tol(atol=1e-4, rtol=1e-4),
torch.float64: tol(atol=1e-4, rtol=1e-4)}),
'TestModule', 'test_forward', device_type='cpu')
],
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),
# No channels_last support for Bilinear currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)
),
ModuleInfo(torch.nn.MaxPool2d,
module_inputs_func=module_inputs_torch_nn_MaxPool2d,
skips=(
# TODO: test_non_contiguous_tensors doesn't handle case where output is not a singleton (such as
# return_indices=True for MaxPool2D), submit fix
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_non_contiguous_tensors'),
# TODO: test_cpu_gpu_parity doesn't handle case where output is not a singleton, submit fix
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_cpu_gpu_parity'),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.NLLLoss,
module_inputs_func=module_inputs_torch_nn_NLLLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.GaussianNLLLoss,
module_inputs_func=module_inputs_torch_nn_GaussianNLLLoss,
skips=(
# No channels_last support for loss functions.
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)),
ModuleInfo(torch.nn.CrossEntropyLoss,
module_inputs_func=module_inputs_torch_nn_CrossEntropyLoss,
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.Hardswish,
module_inputs_func=module_inputs_torch_nn_Hardswish,
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),),
supports_gradgrad=False),
ModuleInfo(torch.nn.TransformerEncoderLayer,
train_and_eval_differ=True,
module_inputs_func=module_inputs_torch_nn_TransformerEncoderLayer,
skips=(
# No channels_last support for TransformerEncoderLayer currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.TransformerDecoderLayer,
module_inputs_func=module_inputs_torch_nn_TransformerDecoderLayer,
skips=(
# No channels_last support for TransformerDecoderLayer currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.Transformer,
module_inputs_func=module_inputs_torch_nn_Transformer,
skips=(
# No channels_last support for Transformer currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.MultiheadAttention,
train_and_eval_differ=True,
module_inputs_func=module_inputs_torch_nn_MultiheadAttention,
skips=(
# No channels_last support for MultiheadAttention currently.
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.Embedding,
module_inputs_func=module_inputs_torch_nn_Embedding,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.ReLU,
module_inputs_func=module_inputs_torch_nn_ReLU,
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.RNNCell,
module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU_Cell, is_rnn=True),
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.GRUCell,
module_inputs_func=module_inputs_torch_nn_RNN_GRU_Cell,
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.LSTMCell,
module_inputs_func=module_inputs_torch_nn_LSTMCell,
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.Sigmoid,
module_inputs_func=module_inputs_torch_nn_Sigmoid,
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),)
),
ModuleInfo(torch.nn.RNN,
train_and_eval_differ=True,
module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU, is_rnn=True),
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),),
decorators=rnn_gru_lstm_module_info_decorators
),
ModuleInfo(torch.nn.GRU,
train_and_eval_differ=True,
module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU, is_rnn=False),
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),),
decorators=rnn_gru_lstm_module_info_decorators),
ModuleInfo(torch.nn.LSTM,
train_and_eval_differ=True,
module_inputs_func=module_inputs_torch_nn_LSTM,
skips=(
DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),),
decorators=rnn_gru_lstm_module_info_decorators)
]
| pytorch-master | torch/testing/_internal/common_modules.py |
# Torch
import torch
import torch.cuda
import torch.jit
import torch.jit._logging
import torch.jit.frontend
import torch.jit.quantized
# Testing utils
from torch.testing._internal.common_dtype import floating_and_complex_types_and
from torch.testing._internal.common_utils import TestCase, \
freeze_rng_state, TemporaryFileName, enable_profiling_mode_for_profiling_tests, is_iterable_of_tensors
from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401
# Standard library
from itertools import chain
from typing import List, Union
from torch._C import TensorType
import io
def check_output_types(self, func, ref_outputs, args, kwargs):
graph = getattr(func, 'last_graph', None)
types = [o.type() for o in graph.outputs()]
self.assertTrue(len(types) == 1)
t = types[0]
torch._C._jit_assert_is_instance(ref_outputs, t)
# Test names in this set are only checked for a single derivative
nn_functional_single_grad = frozenset('test_nn_' + name for name in [
'pdist',
'multilabel_margin_loss',
'max_unpool3d',
'multi_margin_loss',
'binary_cross_entropy',
'binary_cross_entropy_size_average',
'ctc_loss',
'grid_sample',
])
def check_against_reference(self, func, reference_func, output_func, args, kwargs=None,
allow_unused=True, check_types=True, no_grad=False, no_gradgrad=False):
"""Verifies a function performs identically to some reference implementation.
Commonly, this is used to verify that a JIT implementation
(output_func) matches the behavior of the eager implementation
(reference_func).
"""
kwargs = kwargs if kwargs else {}
def allSum(vs):
if isinstance(vs, torch.Tensor):
vs = (vs,)
return sum((i + 1) * v.sum()
for i, v in enumerate(vs)
if v is not None and v.dtype in floating_and_complex_types_and(torch.half, torch.bfloat16))
def clone_tensor(t, preserve_requires_grad):
require_grad = preserve_requires_grad and t.requires_grad
return t.detach().clone().requires_grad_(require_grad)
def clone_inputs(preserve_requires_grad: bool):
inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = []
for arg in args:
if isinstance(arg, torch.Tensor):
inputs.append(clone_tensor(arg, preserve_requires_grad))
elif is_iterable_of_tensors(arg):
inputs.append([clone_tensor(t, preserve_requires_grad) for t in arg])
else:
inputs.append(arg)
return inputs
# Returns tensors in args that requires_grad, including tensors in TensorList args
def get_recording_tensors(args):
recording_tensors: List[torch.Tensor] = []
for arg in args:
if isinstance(arg, torch.Tensor) and arg.requires_grad:
recording_tensors.append(arg)
elif is_iterable_of_tensors(arg):
recording_tensors.extend(filter(lambda t: t.requires_grad, arg))
return recording_tensors
# test no gradients case
nograd_inputs = clone_inputs(preserve_requires_grad=False)
outputs = self.runAndSaveRNG(reference_func, nograd_inputs, kwargs)
with enable_profiling_mode_for_profiling_tests():
outputs_test = self.runAndSaveRNG(func, nograd_inputs, kwargs)
self.assertEqual(outputs, outputs_test)
if check_types:
check_output_types(self, func, outputs_test, nograd_inputs, kwargs)
if no_grad:
# skip grad tests
return
with enable_profiling_mode_for_profiling_tests():
# test single grad case
recording_inputs = clone_inputs(preserve_requires_grad=True)
recording_tensors = get_recording_tensors(recording_inputs)
outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs))
grads = torch.autograd.grad(allSum(outputs), recording_tensors,
allow_unused=allow_unused)
outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs))
grads_test = torch.autograd.grad(allSum(outputs_test), recording_tensors,
allow_unused=allow_unused)
self.assertEqual(outputs, outputs_test)
self.assertEqual(grads, grads_test)
# test the grad grad case
if self._testMethodName in nn_functional_single_grad or no_gradgrad:
return
outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs))
l1 = allSum(outputs)
grads = torch.autograd.grad(l1, recording_tensors, create_graph=True,
allow_unused=allow_unused)
l2 = (allSum(grads) * l1)
grads2 = torch.autograd.grad(l2, recording_tensors, allow_unused=allow_unused)
recording_inputs = clone_inputs(preserve_requires_grad=True)
recording_tensors = get_recording_tensors(recording_inputs)
outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs))
l1_test = allSum(outputs_test)
grads_test = torch.autograd.grad(
l1_test, recording_tensors, create_graph=True, allow_unused=allow_unused)
l2_test = (allSum(grads_test) * l1_test)
grads2_test = torch.autograd.grad(l2_test, recording_tensors, allow_unused=allow_unused)
self.assertEqual(outputs, outputs_test)
self.assertEqual(grads, grads_test)
for g2, g2_test in zip(grads2, grads2_test):
if g2 is None and g2_test is None:
continue
self.assertEqual(g2, g2_test, atol=5e-4, rtol=1e-4)
class JitCommonTestCase(TestCase):
def createFunctionFromGraph(self, trace):
graph = trace if isinstance(trace, torch._C.Graph) else trace.graph()
return torch._C._create_function_from_graph("forward", graph)
def assertExportImport(self, trace, inputs):
m = self.createFunctionFromGraph(trace)
self.assertExportImportModule(m, inputs)
def assertExportImportModule(self, m, inputs):
m_import = self.getExportImportCopy(m)
a = self.runAndSaveRNG(m, inputs)
b = self.runAndSaveRNG(m_import, inputs)
self.assertEqual(a, b, "Results of original model and "
"exported/imported version of model differed")
def runAndSaveRNG(self, func, inputs, kwargs=None):
kwargs = kwargs if kwargs else {}
with freeze_rng_state():
results = func(*inputs, **kwargs)
return results
def getExportImportCopy(self, m, also_test_file=True, map_location=None):
buffer = io.BytesIO()
torch.jit.save(m, buffer)
buffer.seek(0)
imported = torch.jit.load(buffer, map_location=map_location)
if not also_test_file:
return imported
with TemporaryFileName() as fname:
torch.jit.save(imported, fname)
return torch.jit.load(fname, map_location=map_location)
def autoDiffErrorMessage(self, should_autodiff_node, nodes_not_in_diff_graph,
fusion_nodes_not_found, non_fusible_nodes_being_fused,
fusion_nodes_found, nodes_in_diff_graph):
err_msg = "\nFailure in testing nodes' autodifferentiation. "
if should_autodiff_node:
err_msg += "One or more nodes were expected to be autodiffed, " \
"but were not found in specified fusible/nonfusible " \
"DifferentiableGraph groups. \nSpecifically:"
# The node is intended to appear in a differentiable graph but doesn't
diff_nodes_missing = []
# The node is intended to appear in a differentiable graph
# outside of a fusion group but instead is in a fusion group
diff_nodes_in_fusion = []
# The node is intended to appear in a fusion group but doesn't
fusion_nodes_missing = []
# The node is intended to appear in a fusion group but instead
# is just in an outer differentiable graph
fusion_nodes_in_diff = []
for node in nodes_not_in_diff_graph:
if node in non_fusible_nodes_being_fused:
diff_nodes_in_fusion.append(node)
else:
diff_nodes_missing.append(node)
for node in fusion_nodes_not_found:
if node in nodes_in_diff_graph:
fusion_nodes_in_diff.append(node)
else:
fusion_nodes_missing.append(node)
if len(diff_nodes_missing) > 0:
err_msg += f"\n {diff_nodes_missing} were not in one of the " \
"DifferentiableGraphs when they were expected to be. " \
"Did you intend for these nodes to be autodiffed? " \
"If not, remove them from the list of nonfusible nodes."
if len(diff_nodes_in_fusion) > 0:
err_msg += f"\n {diff_nodes_in_fusion} were found in one of the FusionGroups " \
"when they were expected to be just in a DifferentiableGraph. If it was " \
"intended for these nodes to be in FusionGroups, reclassify these nodes as " \
"fusible nodes. If these nodes were not intended to be fused, your " \
"autodifferentiation logic might be wrong."
if len(fusion_nodes_missing) > 0:
err_msg += f"\n {fusion_nodes_missing} were not in one of the FusionGroups " \
"of the DifferentiableGraphs when they were expected to be. " \
"They were also not found in an outer DifferentiableGraph. Did you " \
"intend for these nodes to be autodifferentiated? If not, you should " \
"remove these nodes from the test's fusible nodes. Otherwise your " \
"autodifferentiation logic might be wrong."
if len(fusion_nodes_in_diff) > 0:
err_msg += f"\n {fusion_nodes_in_diff} were not in one of the FusionGroups " \
"of the DifferentiableGraphs when they were expected to be, " \
"instead they were found just in an outer DifferentiableGraph. " \
"Did you intend for these nodes to be fused? If not, you should " \
"move these nodes into the test's nonfusible nodes. Otherwise your " \
"autodifferentiation logic might be wrong."
else:
err_msg += "One or more nodes were not expected to be autodiffed " \
"but were found in a DifferentiableGraph or in a FusionGroup " \
"of a DifferentiableGraph. Did you intend for these nodes to be " \
"autodiffed? If so, change this test to expect autodifferentiation. " \
"\nSpecifically:"
if len(fusion_nodes_found) > 0:
err_msg += f"\n {fusion_nodes_found} were not expected to be in " \
"one of the DifferentiableGraphs, but appeared in a FusionGroup " \
"of a DifferentiableGraph. "
if len(nodes_in_diff_graph) > 0:
err_msg += f"\n {nodes_in_diff_graph} were not expected to " \
"be in one of the DifferentiableGraphs but were."
return err_msg
def assertAutodiffNode(self, graph, should_autodiff_node, nonfusible_nodes, fusible_nodes):
diff_nodes = graph.findAllNodes('prim::DifferentiableGraph')
diff_subgraphs = [node.g('Subgraph') for node in diff_nodes]
# Note: currently no tests have fusible_nodes
fusion_nodes = list(chain.from_iterable([g.findAllNodes('prim::FusionGroup') for g in diff_subgraphs]))
fusion_subgraphs = [node.g('Subgraph') for node in fusion_nodes]
# For any non-fusible node, it must show up in one of the DifferentiableGraphs.
nodes_in_diff_graph = []
nodes_not_in_diff_graph = []
non_fusible_nodes_being_fused = []
for node in nonfusible_nodes:
if any(g.findNode(node) is not None for g in diff_subgraphs):
nodes_in_diff_graph.append(node)
else:
nodes_not_in_diff_graph.append(node)
if any(g.findNode(node) is not None for g in fusion_subgraphs):
non_fusible_nodes_being_fused.append(node)
found_all_nonfusible_nodes = len(nodes_in_diff_graph) == len(nonfusible_nodes)
# For any fusible node, it must show up in one of the FusionGroups in one of the DifferentiableGraphs.
fusion_nodes_found = []
fusion_nodes_not_found = []
for node in fusible_nodes:
if any(g.findNode(node) is not None for g in fusion_subgraphs):
fusion_nodes_found.append(node)
else:
fusion_nodes_not_found.append(node)
found_all_fusible_nodes = len(fusion_nodes_found) == len(fusible_nodes)
if should_autodiff_node is not None:
err_msg = self.autoDiffErrorMessage(should_autodiff_node,
nodes_not_in_diff_graph,
fusion_nodes_not_found,
non_fusible_nodes_being_fused,
fusion_nodes_found,
nodes_in_diff_graph)
self.assertEqual(should_autodiff_node,
found_all_nonfusible_nodes and found_all_fusible_nodes, err_msg)
def checkShapeAnalysis(self, out_sizes: Union[List[int], List[List[int]]],
traced_graph, assert_propagation, constant_prop=True):
# repropagte input shapes provided by tracing,
prev_symbolic_shapes_test_enabled = torch._C._jit_symbolic_shapes_test_mode_enabled()
for enable_test_mode in [True, False]:
# here we are testing allowing/disallowing substituting in complete shapes as constants,
# disallowing constants helps stress test partial eval and substitution pipeline
torch._C._jit_set_symbolic_shapes_test_mode(enable_test_mode)
torch._C._jit_erase_non_input_shape_information(traced_graph)
if constant_prop:
torch._C._jit_pass_constant_propagation(traced_graph)
torch._C._jit_pass_propagate_shapes_on_graph(traced_graph)
# Add sizes to default tensor type to avoid checking something out of scope
# and difficulties with tracer leaving in other parts of tensor type
output = next(traced_graph.outputs()).type()
def test_type(type, actual_size):
sizes = type.symbolic_sizes()
out_type = TensorType.get().with_sizes(sizes)
actual_type = TensorType.get().with_sizes(actual_size)
# always check actual shape is a subtype of the output
self.assertTrue(actual_type.isSubtypeOf(out_type))
# and then if assertion flag is provided, check shape analysis
# is successful
if assert_propagation:
self.assertEqual(out_type.sizes(), actual_size)
if output.isSubtypeOf(torch._C.TensorType.get()):
test_type(output, out_sizes)
else:
tuple_elements = output.elements()
for i in range(len(tuple_elements)):
test_type(tuple_elements[i], out_sizes[i])
torch._C._jit_set_symbolic_shapes_test_mode(prev_symbolic_shapes_test_enabled)
| pytorch-master | torch/testing/_internal/common_jit.py |
import torch
from torch.utils._pytree import tree_flatten, tree_map
from torch.fx.operator_schemas import normalize_function
from torch.testing._internal.jit_utils import clone_inputs
from torch.utils._python_dispatch import TorchDispatchMode
from itertools import combinations
from collections import namedtuple
from copy import deepcopy
# Named Tuples used within SchemaCheckMode
Mutation = namedtuple('Mutation', ['op_name', 'arg_name'])
Aliasing = namedtuple('Aliasing', ['op_name', 'arg_name', 'output_number'])
# Simplified naming for C++ classes
SchemaArgument = torch._C._SchemaArgument
SchemaArgType = torch._C._SchemaArgType
SchemaInfo = torch._C._SchemaInfo
# This TorchDispatchMode Subclass is used to verify op schemas
# This TorchDispatchMode Scubclass currently:
# - Records the called ops
# - Checks for mutations on all inputs
# - Checks for aliasing on all inputs
class SchemaCheckMode(TorchDispatchMode):
def __init__(self):
# Information recorded for testing purposes. For example:
# - incorrect schemas
# - overly conservative schemas
self.ops = []
self.mutated = []
self.aliasing = []
def reset_cache(self):
self.ops.clear()
self.mutated.clear()
self.aliasing.clear()
def display_ops(self):
print(*self.ops, sep=",")
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
def has_mutated(before, after, md):
are_tensors = type(before) == torch.Tensor and type(after) == torch.Tensor
if are_tensors and before.layout != torch.sparse_csr and after.layout != torch.sparse_csr:
return not (
before.size() == after.size() and
torch.allclose(before, after, equal_nan=True) and
md[0] == after.stride() and
md[1] == after.storage()._cdata
)
return False
def has_aliased(lhs, rhs):
try:
return torch._C._overlaps(lhs, rhs)
except Exception as exception:
if str(exception).startswith("Cannot inspect value of type "):
return False
else:
raise exception
def standardize_name(name):
return name if name != "self" else "input"
def unwrap(e):
if isinstance(e, torch.Tensor) and not type(e) == torch.Tensor:
try:
return e.elem
except AttributeError as t:
return e
return e
def parse_metadata(e):
if isinstance(e, torch.Tensor):
if not type(e) == torch.Tensor:
try:
current = e.elem
return (deepcopy(current.stride()), current.storage()._cdata)
except AttributeError as t:
return None
# Sparse CSR tensors do not have strides or storage
elif (e.layout != torch.sparse_csr):
return (deepcopy(e.stride()), e.storage()._cdata)
return None
self.ops.append(func._schema.name)
# Clone and process arguments and outputs
pre_arguments = normalize_function(
func,
args,
kwargs,
normalize_to_only_use_kwargs=True
).kwargs
c_p_args = dict(zip(pre_arguments.keys(), clone_inputs(pre_arguments.values())))
cloned_arguments = {name : tree_map(unwrap, c_p_args.get(name)) for name in c_p_args}
cloned_metadata = {name : tree_map(parse_metadata, tree_flatten(pre_arguments.get(name))[0]) for name in pre_arguments}
out = func(*args, **kwargs)
arguments = {name : tree_map(unwrap, pre_arguments.get(name)) for name in pre_arguments}
tuple_out = out if isinstance(out, tuple) else (out, )
tuple_out = tree_map(unwrap, tuple_out)
schema_info = SchemaInfo(func._schema)
schema_info.add_argument_values(pre_arguments)
# Process arguments with outputs
for i in range(len(func._schema.arguments)):
arg = func._schema.arguments[i]
name = standardize_name(arg.name)
if arguments.get(name) is not None:
before = cloned_arguments.get(name)
md = cloned_metadata.get(name)
after = arguments.get(name)
for j in range(len(tuple_out)):
# aten::_unsafe_view is intended to have incorrect aliasing notation (hence unsafe)
if has_aliased(tuple_out[j], after) and func._schema.name != 'aten::_unsafe_view':
if not schema_info.may_contain_alias(
SchemaArgument(SchemaArgType.output, j),
SchemaArgument(SchemaArgType.input, i)):
raise RuntimeError(f'Argument {name} is not defined to alias output but was aliasing')
else:
self.aliasing.append(Aliasing(func._schema.name, name, f"output_{j}"))
if any(has_mutated(a, b, c) for a, b, c in zip(tree_flatten(before)[0], tree_flatten(after)[0], md)):
if not schema_info.is_mutable(SchemaArgument(SchemaArgType.input, i)):
raise RuntimeError(f"Argument {name} is not defined as mutable but was mutated")
else:
self.mutated.append(Mutation(func._schema.name, name))
# Aliasing between outputs
for i, j in combinations(range(len(func._schema.returns)), 2):
if has_aliased(tuple_out[i], tuple_out[j]):
if not schema_info.may_contain_alias(
SchemaArgument(SchemaArgType.output, i),
SchemaArgument(SchemaArgType.output, j)):
raise RuntimeError(f'Outputs {i} and {j} alias unexpectedly')
return out
| pytorch-master | torch/testing/_internal/schema_check_mode.py |
# Torch
from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401
import torch.nn.functional as F
import torch
import torch.cuda
import torch.jit
import torch.jit._logging
import torch.jit.frontend
from torch.testing._internal.common_nn import module_tests, new_module_tests
from torch.testing._internal.common_utils import is_iterable_of_tensors
import collections
from copy import deepcopy
from typing import Any, Dict, List, Union
import math # noqa: F401
# Testing utils
from torch._six import inf
# TODO: include files like this should not set the default dtype
torch.set_default_dtype(torch.double)
L = 20
M = 10
S = 5
def unpack_variables(args):
if isinstance(args, tuple):
return tuple(unpack_variables(elem) for elem in args)
else:
return args
class dont_convert(tuple):
pass
non_differentiable = collections.namedtuple('non_differentiable', ['tensor'])
def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.double, device=None):
if not isinstance(call_args, tuple):
call_args = (call_args,)
def map_arg(arg):
def maybe_non_contig(tensor):
if not non_contiguous or tensor.numel() < 2:
return tensor.clone()
return noncontiguous_like(tensor)
def conjugate(tensor):
return tensor.conj()
if isinstance(arg, torch.Size) or isinstance(arg, dont_convert):
return arg
elif isinstance(arg, tuple) and len(arg) == 0:
var = conjugate(torch.randn((), dtype=dtype, device=device))
var.requires_grad = requires_grad
return var
elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):
return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad)
# double check casting
elif isinstance(arg, non_differentiable):
if isinstance(arg.tensor, torch.Tensor):
if arg.tensor.dtype == torch.float:
return maybe_non_contig(arg.tensor.to(dtype=torch.double, device=device))
if arg.tensor.dtype == torch.cfloat:
return conjugate(maybe_non_contig(arg.tensor.to(dtype=torch.cdouble, device=device)))
return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
elif isinstance(arg, torch.Tensor):
if arg.dtype == torch.float:
arg = arg.double()
if arg.dtype == torch.cfloat:
arg = arg.to(torch.cdouble)
if arg.is_complex() != dtype.is_complex:
raise RuntimeError("User provided tensor is real for a test that runs with complex dtype, ",
"which is not supported for now")
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards
v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone()
v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex())
return v
elif callable(arg):
return map_arg(arg(dtype=dtype, device=device))
else:
return arg
args_out = tuple(map_arg(arg) for arg in call_args)
kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}
return args_out, kwargs_out
# NB: JIT script tests for all nn functional interfaces, script mode does
# not support in_place operations yet, so no inplace operation tests added.
# removed all the deprecated functions
#
# (
# method name,
# input size/constructing fn,
# args (tuple represents shape of a tensor arg),
# test variant name(will be used at test name suffix,
# 'inplace' skips grad tests), // optional
# (True, nonfusible_nodes, fusible_nodes) for autodiff // optional
# fn to determine if test should be skipped, // optional
# fn mapping output to part that should be gradcheck'ed, // optional
# kwargs for function, // optional
# )
nn_functional_tests = [
('conv1d', (S, S, S), ((S, S, S),)),
('conv2d', (S, S, S, S), ((S, S, S, S),)),
('conv3d', (S, S, S, S, S), ((S, S, S, S, S),)),
('conv_transpose1d', (S, S, S), ((S, S, S),)),
('conv_transpose2d', (S, S, S, S), ((S, S, S, S),)),
('conv_transpose3d', (S, S, S, S, S), ((S, S, S, S, S),)),
('conv_tbc', (S, S, S), ((S, S, S), (S,), 2)),
('avg_pool1d', (S, S, S), (3,)),
('avg_pool2d', (S, S, S, S), (3,), '', (True,)),
('avg_pool3d', (S, S, S, S, S), (3,)),
('fractional_max_pool2d', (S, S, S, S), (3, [2, 3],)),
('max_pool1d', (S, S, S), (2, 1)),
('max_pool1d', (S, S, S), (2, 1, 1, 1, False, True), 'with_indices'),
('max_pool2d', (S, S, S, S), (2, 1), '', (True, 'aten::max_pool2d_with_indices')),
('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')),
('max_pool3d', (S, S, S, S, S), (2, 1)),
('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)),
('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)),
('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)),
('lp_pool1d', (S, S, S), (2., 3, 2,)),
('lp_pool2d', (S, S, S, S), (2., 3, 2,)),
('adaptive_max_pool1d', (S, S, S), (5,)),
('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)),
('adaptive_max_pool3d', (S, S, S, S, S), ([3, 2, 2],)),
('adaptive_avg_pool1d', (S, S, S), (5,), '', (True,)),
('adaptive_avg_pool2d', (S, S, S, S), ([5, 7],), '', (True,)),
('adaptive_avg_pool3d', (S, S, S, S, S), ([3, 2, 2],), '', (True,)),
('dropout', (S, S, S), (0.5,), '', (True, 'aten::native_dropout')),
('alpha_dropout', (S, S, S), (0.5,)),
('dropout2d', (S, S, S), (0.5,)),
('dropout2d', (S, S, S, S), (0.5,), 'batched'),
('dropout3d', (S, S, S, S), (0.5,)),
('dropout3d', (S, S, S, S, S), (0.5,), 'batched'),
('feature_alpha_dropout', (S, S, S), (0.5,)),
('threshold', (S, S, S), (0.1, 2.), '', (True,)),
('threshold', (S, S, S), (0.1, 2., True), 'inplace'),
('relu', (S, S, S), (), '', (True,)),
('relu', (S, S, S), (), 'inplace'),
('glu', (S - 1, S - 1, S - 1), (),),
('hardtanh', (S, S, S), (-0.5, 0.5), '', (True,)),
('hardtanh', (S, S, S), (-0.5, 0.5, True), 'inplace'),
('relu6', (S, S, S), (), '', (True,)),
('relu6', (S, S, S), (True), 'inplace'),
('elu', (S, S, S), (0.9,),),
('elu', (S, S, S), (0.9, True), 'inplace'),
('selu', (S, S, S), (),),
('selu', (S, S, S), (True), 'inplace'),
('celu', (S, S, S), (0.9,),),
('celu', (S, S, S), (0.9, True), 'inplace'),
('leaky_relu', (S, S, S), (0.02,), '', (True,)),
('leaky_relu', (S, S, S), (0.02,), 'inplace'),
('rrelu', (S, S), (0.1, 0.3, False),),
('rrelu', (S, S), (0.1, 0.3, False, True), 'inplace'),
('hardshrink', (S, S, S), (0.4,), '', (True,)),
('tanhshrink', (S, S, S), (),),
('softsign', (S, S, S), (),),
('softplus', (S, S, S), (), '', (True,)),
('softmin', (S, S, S), (0,),),
('softmax', (S, S, S), (0,), '', (True,)),
('softmax', (S, S, S), (0, 3, torch.double), 'with_all_args', (True,)),
('tanh', (S, S, S), (), '', (True,)),
('sigmoid', (S, S, S), (), '', (True,)),
('silu', (S, S, S), (), '', (True,)),
('log_softmax', (S, S, S), (0,), '', (True,)),
('linear', (S, S), ((M, S),), '', (True, ['aten::linear'])),
('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::linear'])),
('bilinear', (S, S, S), ((S, S, M), torch.zeros(M, S, M),),),
('embedding', torch.tensor([[1, 2, 4, 5], [4, 3, 2, 5]]), (torch.rand(6, 3), ), '', (True,)),
('embedding_bag', torch.tensor([1, 2, 4, 2]), (torch.rand(5, 3), torch.tensor([0, 4]),),),
('batch_norm', (S, S),
(non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), None, None, True, ),
'training', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (0, S, S, S),
(non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
'size_zero', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (0, S, S, S),
(non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
'size_zero_inference', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S),
(non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
'with_weight_and_bias_training', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
None, non_differentiable(torch.ones(S)), True, ),
'with_only_bias_training', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), None, True, ),
'with_only_weight_training', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
None, None, False, ),
'inference', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), False, ),
'with_weight_and_bias_inference', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
None, non_differentiable(torch.ones(S)), False, ),
'with_only_bias_inference', (True, 'aten::_batch_norm_impl_index')),
('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
non_differentiable(torch.randn(S)), None, False, ),
'with_only_weight_inference', (True, 'aten::_batch_norm_impl_index')),
('instance_norm', (S, S, S), (non_differentiable(torch.zeros(S)), non_differentiable(torch.ones(S))),),
('layer_norm', (S, S, S, S), ([5],), '',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),), 'with_only_weight',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
('layer_norm', (S, S, S, S), ([5], None, non_differentiable(torch.rand(S)),), 'with_only_bias',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),
non_differentiable(torch.rand(S))), 'with_weight_and_bias',
(False, ['aten::contiguous', 'aten::_batch_norm_impl_index', 'aten::addcmul'])),
('group_norm', (S, S, S), (1, torch.rand(5),),),
('local_response_norm', (S, S, S), (2, ),),
('nll_loss', F.log_softmax(torch.randn(3, 5), dim=0), (torch.tensor([1, 0, 4]),), '',),
('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2),),),
('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2), True, True), 'full'),
('kl_div', F.log_softmax(torch.randn(S, 10), 1), (F.softmax(torch.randn(S, 10), 1),),),
('cross_entropy', (3, S), (torch.randint(S, (3,), dtype=torch.int64),),),
('binary_cross_entropy_with_logits', (3,), (torch.empty(3).random_(2), ),),
('smooth_l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('huber_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('mse_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('smooth_l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('huber_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('mse_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
('margin_ranking_loss', (S,), ((S,), (S,)),),
('hinge_embedding_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('multilabel_soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
('cosine_embedding_loss', (S, S), ((S, S), non_differentiable(torch.rand(S,))),),
('pixel_shuffle', (1, 9, 4, 4), (3,),),
('pixel_unshuffle', (1, 1, 12, 12), (3,),),
('affine_grid', (S, 2, 3), (torch.Size([S, 1, 7, 7]),),),
('pad', (3, 3, 4, 2), ([1, 1],),),
('pairwise_distance', (S, S), ((S, S),),),
('pdist', (S, S), (),),
('cosine_similarity', (S, S), ((S, S),),),
('triplet_margin_loss', (S, S), ((S, S), (S, S)),),
('normalize', (S, S, S), (),),
('unfold', (S, S, S, S), ([2, 3]),),
('fold', (1, 3 * 2 * 2, 12), ([4, 5], [2, 2]),),
('grid_sample', (S, S, S, S), (non_differentiable(torch.rand(S, S, S, 2)),),),
('gumbel_softmax', (S, S), (2.,), '', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),
('gumbel_softmax', (S, S), (2., True,), 'hard', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),
('multilabel_margin_loss', torch.tensor([[0.2, -0.2, 0.07]]), (torch.tensor([[0, 0, 1]]),),),
('multi_margin_loss', (S, S), (non_differentiable(torch.randint(S, (S, ), dtype=torch.int64)),
1, 1., non_differentiable(torch.randn(S))),),
('binary_cross_entropy', torch.randn(3, 2).sigmoid(), (non_differentiable(torch.rand(3, 2)),
non_differentiable(torch.randn(3, 2))),),
('binary_cross_entropy', torch.randn(3, 2).sigmoid(),
(non_differentiable(torch.rand(3, 2)),
non_differentiable(torch.randn(3, 2)), None, None, 'mean'), 'size_average'),
('ctc_loss', torch.rand(S, S, S).log_softmax(2).detach().requires_grad_(),
(torch.randint(1, S, (S, S), dtype=torch.long), torch.full((S,), S, dtype=torch.long),
torch.randint(1, S, (S,), dtype=torch.long))),
('upsample', torch.randn(S, S, M, M), (None, 2.), 'with_scale'),
('upsample', torch.randn(S, S, M, M), (4,), 'with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'nearest_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'nearest_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'nearest_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'area_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'area_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'area_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bilinear_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bilinear_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'bilinear_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bicubic_4d'),
('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bicubic_4d_with_scale'),
('interpolate', torch.randn(S, S, M, M), (4,), 'bicubic_4d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'nearest_3d'),
('interpolate', torch.randn(S, M, M), (None, 2.), 'nearest_3d_with_scale'),
('interpolate', torch.randn(S, M, M), (4,), 'nearest_3d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'area_3d'),
('interpolate', torch.randn(S, M, M), (None, 2.), 'area_3d_with_scale'),
('interpolate', torch.randn(S, M, M), (4,), 'area_3d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'linear_3d'),
('interpolate', torch.randn(S, M, M), (None, 2.), 'linear_3d_with_scale'),
('interpolate', torch.randn(S, M, M), (4,), 'linear_3d_with_size'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'nearest_5d_with_scale'),
('interpolate', torch.randn(S, M, M, M, M), (4,), 'nearest_5d_with_size'),
('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'area_5d'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'area_5d_with_scale'),
('interpolate', torch.randn(S, M, M, M, M), (4,), 'area_5d_with_size'),
('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'trilinear_5d'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'trilinear_5d_with_scale'),
('interpolate', torch.randn(S, M, M, M, M), (4,), 'trilinear_5d_with_size'),
('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2, None, 'nearest', None, False),
'nearest_4d_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (4, None, 'nearest', None, False),
'nearest_4d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (None, 2., 'bilinear', None, False),
'bilinear_4d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (4, None, 'bilinear', None, False),
'bilinear_4d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (None, 2., 'bicubic', None, False),
'bicubic_4d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, S, M, M), (4, None, 'bicubic', None, False),
'bicubic_4d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (None, 2., 'nearest', None, False),
'nearest_3d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (4, None, 'nearest', None, False),
'nearest_3d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (None, 2., 'linear', None, False),
'linear_3d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M), (4, None, 'linear', None, False),
'linear_3d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'nearest', None, False),
'nearest_5d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (4, None, 'nearest', None, False),
'nearest_5d_with_size_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'trilinear', None, False),
'trilinear_5d_with_scale_not_recompute_scale_factor'),
('interpolate', torch.randn(S, M, M, M, M), (4, None, 'trilinear', None, False),
'trilinear_5d_with_size_not_recompute_scale_factor'),
]
script_template = '''
def the_method({}):
return {}
'''
def value_to_literal(value):
if isinstance(value, str):
# Quotes string and escapes special characters
return ascii(value)
if isinstance(value, torch.Tensor):
return 'torch.' + str(value)
else:
return str(value)
def get_call(method_name, func_type, args, kwargs):
kwargs_str = ', '.join([k + '=' + value_to_literal(v) for k, v in kwargs.items()])
self_arg = args[0]
if(func_type == 'method'):
args = args[1:]
argument_str = ', '.join(args)
argument_str += ', ' if len(args) and len(kwargs) else ''
argument_str += kwargs_str
if func_type == 'functional' or func_type == 'function':
call = 'torch.{}({})'.format(method_name, argument_str)
elif func_type == 'method':
call = '{}.{}({})'.format(self_arg, method_name, argument_str)
elif func_type == 'nn_functional':
call = 'torch.nn.functional.{}({})'.format(method_name, argument_str)
else:
raise TypeError('Unsupported function type')
return call
def get_constant(x):
if x == inf:
return 'math.inf'
if x == -inf:
return '-math.inf'
return x
def get_script_args(args):
formals: List[str] = []
tensors: List[Union[torch.Tensor, List[torch.Tensor]]] = []
actuals: List[str] = []
for arg in args:
if isinstance(arg, torch.Tensor):
name = 'i{}'.format(len(formals))
formals.append(name)
actuals.append(name)
tensors.append(arg)
elif is_iterable_of_tensors(arg):
name = 'i{}'.format(len(formals))
formals.append(name + ': List[torch.Tensor]')
actuals.append(name)
tensors.append(list(arg))
elif isinstance(arg, str):
actuals.append("'{}'".format(arg))
else:
actuals.append(str(get_constant(arg)))
return (formals, tensors, actuals)
# create a script function from (name, func_type, output_process_fn),
# and returns the compiled function and example inputs
def gen_script_fn_and_args(method_name, func_type, *args, **kwargs):
formals, tensors, actuals = get_script_args(args)
call = get_call(method_name, func_type, actuals, kwargs)
script = script_template.format(', '.join(formals), call)
CU = torch.jit.CompilationUnit(script)
return CU.the_method, tensors
# create a script function from (name, func_type),
# returns a function takes in (args, kwargs) and runs the compiled function
def create_script_fn(self, method_name, func_type):
# function returns tuple containing original output and
# filtered output to be used in checking gradients
def script_fn(*args, **kwargs):
fn, tensors = gen_script_fn_and_args(method_name, func_type, *args, **kwargs)
self.assertExportImport(fn.graph, tensors)
output = fn(*tensors)
# skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
script_fn.last_graph = fn.graph_for(*tensors) # type: ignore[attr-defined]
return output
return script_fn
class SplitInputs():
all_tensors: List[Any]
tensor_args: List[Any]
nontensor_args: List[Any]
arg_types: List[str]
tensor_kwargs: Dict[str, Any]
kwarg_order: List[str]
nontensor_kwargs: Dict[str, Any]
kwarg_types: Dict[str, Any]
@staticmethod
def _is_tensor_input(arg):
return isinstance(arg, torch.Tensor) or is_iterable_of_tensors(arg)
def __init__(self, args, kwargs):
self.arg_types = ['t' if self._is_tensor_input(arg) else 's' for arg in args]
self.kwarg_types = {k: 't' if self._is_tensor_input(v) else 's' for k, v in kwargs.items()}
self.tensor_args = [arg for arg in args if self._is_tensor_input(arg)]
self.nontensor_args = [arg for arg in args if not self._is_tensor_input(arg)]
self.tensor_kwargs = {k: v for k, v in kwargs.items() if self._is_tensor_input(v)}
self.nontensor_kwargs = {k: v for k, v in kwargs.items() if not self._is_tensor_input(v)}
self.all_tensors = [*self.tensor_args, *[v for k, v in self.tensor_kwargs.items()]]
self.kwarg_order = [k for k, v in kwargs.items()]
def nontensors_match(self, other: 'SplitInputs'):
if self.arg_types != other.arg_types:
return False
if self.kwarg_types != other.kwarg_types:
return False
if self.kwarg_order != other.kwarg_order:
return False
if self.nontensor_args != other.nontensor_args:
return False
if self.nontensor_kwargs != other.nontensor_kwargs:
return False
return True
# make a new function where all non-tensor arguments in 'args' have been partially
# applied, and all tensor arguments remain.
# used to trace functions when some arguments are not tensors
def partial_apply_nontensors(fn, args, kwargs):
inputs = SplitInputs(args, kwargs)
def new_fn(*tensors_):
tensors = iter(tensors_)
full_args = [args[i] if s == 's' else next(tensors) for i, s in enumerate(inputs.arg_types)]
full_kwargs = {k: kwargs[k] if s == 's' else next(tensors) for k, s in inputs.kwarg_types.items()}
return fn(*full_args, **full_kwargs)
return new_fn, inputs
# create a trace function from input fn
def create_traced_fn(self, fn, cache_traced_fn=False):
def traced_fn(*inputs, **kwargs):
# `check_trace` is set to False because check_trace is run with @no_grad
# Also, `check_against_reference` already does all the checks
# against python function
fn_tensors, split_inputs = partial_apply_nontensors(fn, inputs, kwargs)
if not cache_traced_fn or not hasattr(traced_fn, 'traced'):
traced = torch.jit.trace(fn_tensors, split_inputs.all_tensors, check_trace=False)
self.assertExportImport(traced.graph, split_inputs.all_tensors)
output = traced(*split_inputs.all_tensors)
if cache_traced_fn:
traced_fn.traced = traced
traced_fn.split_inputs = split_inputs
else:
# Guard to check that nontensor inputs are the same as during tracing
self.assertTrue(traced_fn.split_inputs.nontensors_match(split_inputs))
output = traced_fn.traced(*split_inputs.all_tensors)
traced = traced_fn.traced
# skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
traced_fn.last_graph = traced.graph_for(*split_inputs.all_tensors) # type: ignore[attr-defined]
traced_fn.graph = traced.graph # type: ignore[attr-defined]
return output
return traced_fn
# known to be failing in script
EXCLUDE_SCRIPT = {
'test_norm_fro_default',
'test_norm_fro_cpu',
'test_norm_nuc',
'test_norm_fro',
'test_norm_nuc_batched',
# aten op has additional cudnn argument
'test_nn_unfold',
# flaky test - TODO fix
'test_nn_ctc_loss',
# unknown builtin op
'test_nn_fold',
# jit doesn't support sparse tensors.
'test_to_sparse',
'test_to_sparse_dim',
}
# generates a script function and set of example inputs
# from a specified test in the format of nn_functional_tests
def get_nn_functional_compiled_fn_and_inputs(name, self_size, args, variant_name='', *extra_args):
test_name = 'test_nn_' + name
if variant_name != '':
test_name = test_name + '_' + variant_name
no_grad = variant_name == 'inplace'
self_variable = create_input((self_size,))[0][0]
kwargs = None
# need to record this because methods can change the size (e.g. unsqueeze)
args_variable, kwargs_variable = create_input(args)
self_tensor = deepcopy(self_variable.data)
args_tensor = deepcopy(unpack_variables(args_variable))
f_args_variable = (self_variable,) + args_variable
f_args_tensor = (self_tensor,) + args_tensor
with torch._jit_internal._disable_emit_hooks():
script_fn, inputs = gen_script_fn_and_args(name, "nn_functional", *f_args_variable)
return script_fn, inputs
# additional modules test
# TODO: delete this list once we make all nn_tests work
additional_module_tests = [
{
'module_name': 'Bilinear',
'constructor_args': (S, S, M),
'input_size': (S, S),
'extra_args': ((S, S),)
},
{
'module_name': 'RNNCell',
'constructor_args': (S, S),
'input_size': (S, S),
},
{
'module_name': 'LSTMCell',
'constructor_args': (S, S),
'input_size': (S, S),
},
{
'module_name': 'GRUCell',
'constructor_args': (S, S),
'input_size': (S, S),
},
{
'module_name': 'MultiheadAttention',
'constructor_args': (128, 8),
'input_size': (10, 8, 128),
'extra_args': (torch.randn(10, 8, 128), torch.randn(10, 8, 128)),
'slowTest': True
},
{
'module_name': 'Transformer',
'constructor_args': (1, 1, 1, 1, 2),
'input_size': (3, 1, 1),
'extra_args': (torch.randn(1, 1, 1),),
'slowTest': True
}
]
EXCLUDE_SCRIPT_MODULES = {
'test_nn_AdaptiveAvgPool2d_tuple_none',
'test_nn_AdaptiveAvgPool3d_tuple_none',
'test_nn_AdaptiveMaxPool2d_tuple_none',
'test_nn_AdaptiveMaxPool3d_tuple_none',
# Doesn't use future division, so this is not supported
'test_nn_CrossMapLRN2d',
}
script_method_template = '''
def forward({}):
return {}
'''
def create_script_module(self, nn_module, constructor_args, *args, **kwargs):
def script_module(*args, **kwargs):
formals, tensors, actuals = get_script_args(args)
method_args = ', '.join(['self'] + actuals)
call_args_str = ', '.join(actuals)
call = "self.submodule({})".format(call_args_str)
script = script_method_template.format(method_args, call)
submodule_constants = []
if kwargs.get('is_constant'):
submodule_constants = ['submodule']
# Create module to use the script method
class TheModule(torch.jit.ScriptModule):
__constants__ = submodule_constants
def __init__(self):
super(TheModule, self).__init__()
self.submodule = nn_module(*constructor_args)
def make_module(script):
module = TheModule()
# check __repr__
str(module)
module.define(script)
return module
module = make_module(script)
if self:
self.assertExportImportModule(module, tensors)
module(*args)
# skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
create_script_module.last_graph = module.graph # type: ignore[attr-defined]
return module
return script_module
def check_alias_annotation(method_name, args, kwargs, *, aten_name, func_type='method'):
formals, tensors, actuals = get_script_args(args)
call = get_call(method_name, func_type, actuals, kwargs)
script = script_template.format(', '.join(formals), call)
CU = torch.jit.CompilationUnit(script)
# to clean up IR
torch._C._jit_pass_inline(CU.the_method.graph)
torch._C._jit_pass_constant_propagation(CU.the_method.graph)
torch._C._jit_check_alias_annotation(CU.the_method.graph, tuple(tensors), aten_name)
def get_nn_module_name_from_kwargs(**kwargs):
if 'module_name' in kwargs:
return kwargs['module_name']
elif 'fullname' in kwargs:
return kwargs['fullname']
elif 'constructor' in kwargs:
return kwargs['constructor'].__name__
def get_nn_mod_test_name(**kwargs):
if 'fullname' in kwargs:
test_name = kwargs['fullname']
else:
test_name = get_nn_module_name_from_kwargs(**kwargs)
if 'desc' in kwargs:
test_name = "{}_{}".format(test_name, kwargs['desc'])
return 'test_nn_{}'.format(test_name)
def get_nn_module_class_from_kwargs(**kwargs):
name = get_nn_module_name_from_kwargs(**kwargs)
index = name.find("_")
if index == -1:
return name
else:
return name[0:name.find("_")]
def try_get_nn_module_compiled_mod_and_inputs(*args, **kwargs):
name = get_nn_module_name_from_kwargs(**kwargs)
if 'desc' in kwargs and 'eval' in kwargs['desc']:
# eval() is not supported, so skip these tests
return
test_name = name
if 'desc' in kwargs:
test_name = "{}_{}".format(test_name, kwargs['desc'])
test_name = get_nn_mod_test_name(**kwargs)
if test_name in EXCLUDE_SCRIPT_MODULES:
return
if 'constructor' in kwargs:
nn_module = kwargs['constructor']
else:
nn_module = getattr(torch.nn, name)
if "FunctionalModule" in str(nn_module):
return
if 'constructor_args_fn' in kwargs:
constructor_args = kwargs['constructor_args_fn']()
else:
constructor_args = kwargs.get('constructor_args', ())
# Set up inputs from tuple of sizes or constructor fn
input_dtype = torch.double
if 'input_fn' in kwargs:
input = kwargs['input_fn']()
if isinstance(input, torch.Tensor):
input = (input,)
if all(tensor.is_complex() for tensor in input):
input_dtype = torch.cdouble
else:
input = (kwargs['input_size'],)
# Extra parameters to forward()
if 'extra_args' in kwargs:
input = input + kwargs['extra_args']
if 'target_size' in kwargs:
input = input + (kwargs['target_size'],)
elif 'target_fn' in kwargs:
if torch.is_tensor(input):
input = (input,)
input = input + (kwargs['target_fn'](),)
args_variable, kwargs_variable = create_input(input, dtype=input_dtype)
f_args_variable = deepcopy(unpack_variables(args_variable))
out_var = deepcopy(f_args_variable)
args, mod = f_args_variable, create_script_module(None, nn_module, constructor_args, *f_args_variable)(*f_args_variable)
return mod, out_var
def get_all_nn_module_tests():
return module_tests + new_module_tests + additional_module_tests
| pytorch-master | torch/testing/_internal/jit_metaprogramming_utils.py |
import os
import re
import sys
from typing import List
__all__ = [
"check_code_for_cuda_kernel_launches",
"check_cuda_kernel_launches",
]
# FILES TO EXCLUDE (match is done with suffix using `endswith`)
# You wouldn't drive without a seatbelt, though, so why would you
# launch a kernel without some safety? Use this as a quick workaround
# for a problem with the checker, fix the checker, then de-exclude
# the files in question.
exclude_files: List[str] = []
# Without using a C++ AST we can't 100% detect kernel launches, so we
# model them as having the pattern "<<<parameters>>>(arguments);"
# We then require that `C10_CUDA_KERNEL_LAUNCH_CHECK` be
# the next statement.
#
# We model the next statement as ending at the next `}` or `;`.
# If we see `}` then a clause ended (bad) if we see a semi-colon then
# we expect the launch check just before it.
#
# Since the kernel launch can include lambda statements, it's important
# to find the correct end-paren of the kernel launch. Doing this with
# pure regex requires recursive regex, which aren't part of the Python
# standard library. To avoid an additional dependency, we build a prefix
# regex that finds the start of a kernel launch, use a paren-matching
# algorithm to find the end of the launch, and then another regex to
# determine if a launch check is present.
# Finds potential starts of kernel launches
kernel_launch_start = re.compile(
r"^.*<<<[^>]+>>>\s*\(", flags=re.MULTILINE
)
# This pattern should start at the character after the final paren of the
# kernel launch. It returns a match if the launch check is not the next statement
has_check = re.compile(
r"\s*;(?![^;}]*C10_CUDA_KERNEL_LAUNCH_CHECK\(\);)", flags=re.MULTILINE
)
def find_matching_paren(s: str, startpos: int) -> int:
"""Given a string "prefix (unknown number of characters) suffix"
and the position of the first `(` returns the index of the character
1 past the `)`, accounting for paren nesting
"""
opening = 0
for i, c in enumerate(s[startpos:]):
if c == '(':
opening += 1
elif c == ')':
opening -= 1
if opening == 0:
return startpos + i + 1
raise IndexError("Closing parens not found!")
def should_exclude_file(filename) -> bool:
for exclude_suffix in exclude_files:
if filename.endswith(exclude_suffix):
return True
return False
def check_code_for_cuda_kernel_launches(code, filename=None):
"""Checks code for CUDA kernel launches without cuda error checks.
Args:
filename - Filename of file containing the code. Used only for display
purposes, so you can put anything here.
code - The code to check
Returns:
The number of unsafe kernel launches in the code
"""
if filename is None:
filename = "##Python Function Call##"
# We break the code apart and put it back together to add
# helpful line numberings for identifying problem areas
code = enumerate(code.split("\n")) # Split by line breaks
code = [f"{lineno}: {linecode}" for lineno, linecode in code] # Number the lines
code = '\n'.join(code) # Put it back together
num_launches_without_checks = 0
for m in kernel_launch_start.finditer(code):
end_paren = find_matching_paren(code, m.end() - 1)
if has_check.match(code, end_paren):
num_launches_without_checks += 1
context = code[m.start():end_paren + 1]
print(f"Missing C10_CUDA_KERNEL_LAUNCH_CHECK in '{filename}'. Context:\n{context}", file=sys.stderr)
return num_launches_without_checks
def check_file(filename):
"""Checks a file for CUDA kernel launches without cuda error checks
Args:
filename - File to check
Returns:
The number of unsafe kernel launches in the file
"""
if not (filename.endswith(".cu") or filename.endswith(".cuh")):
return 0
if should_exclude_file(filename):
return 0
with open(filename, "r") as fo:
contents = fo.read()
unsafeCount = check_code_for_cuda_kernel_launches(contents, filename)
return unsafeCount
def check_cuda_kernel_launches():
"""Checks all pytorch code for CUDA kernel launches without cuda error checks
Returns:
The number of unsafe kernel launches in the codebase
"""
torch_dir = os.path.dirname(os.path.realpath(__file__))
torch_dir = os.path.dirname(torch_dir) # Go up to parent torch
torch_dir = os.path.dirname(torch_dir) # Go up to parent caffe2
kernels_without_checks = 0
files_without_checks = []
for root, dirnames, filenames in os.walk(torch_dir):
# `$BASE/build` and `$BASE/torch/include` are generated
# so we don't want to flag their contents
if root == os.path.join(torch_dir, "build") or root == os.path.join(torch_dir, "torch/include"):
# Curtail search by modifying dirnames and filenames in place
# Yes, this is the way to do this, see `help(os.walk)`
dirnames[:] = []
continue
for x in filenames:
filename = os.path.join(root, x)
file_result = check_file(filename)
if file_result > 0:
kernels_without_checks += file_result
files_without_checks.append(filename)
if kernels_without_checks > 0:
count_str = f"Found {kernels_without_checks} instances in " \
f"{len(files_without_checks)} files where kernel " \
"launches didn't have checks."
print(count_str, file=sys.stderr)
print("Files without checks:", file=sys.stderr)
for x in files_without_checks:
print(f"\t{x}", file=sys.stderr)
print(count_str, file=sys.stderr)
return kernels_without_checks
if __name__ == "__main__":
unsafe_launches = check_cuda_kernel_launches()
sys.exit(0)
| pytorch-master | torch/testing/_internal/check_kernel_launches.py |
r"""Importing this file must **not** initialize CUDA context. test_distributed
relies on this assumption to properly run. This means that when this is imported
no CUDA calls shall be made, including torch.cuda.device_count(), etc.
torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported.
"""
import sys
import os
import platform
import re
import gc
import types
import math
from functools import partial
import inspect
import io
import copy
import operator
import argparse
import unittest
import warnings
import random
import contextlib
import shutil
import threading
from pathlib import Path
import socket
import subprocess
import time
from collections.abc import Sequence, Mapping
from contextlib import contextmanager, closing
from functools import wraps
from itertools import product
from copy import deepcopy
import tempfile
import json
import __main__ # type: ignore[import]
import errno
import ctypes
from typing import Any, Dict, Iterable, Iterator, Optional, Union, List, Tuple, Type, TypeVar, Callable
from unittest.mock import MagicMock
import numpy as np
import expecttest
from torch.testing._comparison import (
assert_equal as assert_equal,
Pair,
TensorLikePair,
BooleanPair,
NumberPair,
UnsupportedInputs,
NonePair,
ErrorMeta,
)
import torch
import torch.cuda
from torch.testing import make_tensor
from torch._utils_internal import get_writable_path
from torch._six import string_classes
from torch import Tensor
import torch.backends.cudnn
import torch.backends.mkl
import torch.backends.xnnpack
from enum import Enum
from statistics import mean
import functools
from .composite_compliance import no_dispatch
from torch.testing._internal.common_dtype import get_all_dtypes
from torch.nn import ModuleList, ModuleDict, Sequential, ParameterList, ParameterDict
from torch._C import ScriptList, ScriptDict # type: ignore[attr-defined]
from torch.onnx import (register_custom_op_symbolic,
unregister_custom_op_symbolic)
torch.backends.disable_global_flags()
PYTEST_FILES = ["test_ops", "test_ops_gradients", "test_ops_jit"]
FILE_SCHEMA = "file://"
if sys.platform == 'win32':
FILE_SCHEMA = "file:///"
IS_CI = bool(os.getenv('CI'))
IS_SANDCASTLE = os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle'
IS_FBCODE = os.getenv('PYTORCH_TEST_FBCODE') == '1'
IS_REMOTE_GPU = os.getenv('PYTORCH_TEST_REMOTE_GPU') == '1'
RETRY_TEST_CASES = os.getenv('PYTORCH_RETRY_TEST_CASES') == '1'
OVERRIDE_FLAKY_SIGNAL = os.getenv('PYTORCH_OVERRIDE_FLAKY_SIGNAL') == '1'
MAX_NUM_RETRIES = 3
DISABLED_TESTS_FILE = '.pytorch-disabled-tests.json'
SLOW_TESTS_FILE = '.pytorch-slow-tests.json'
NATIVE_DEVICES = ('cpu', 'cuda', 'meta')
class _TestParametrizer(object):
"""
Decorator class for parametrizing a test function, yielding a set of new tests spawned
from the original generic test, each specialized for a specific set of test inputs. For
example, parametrizing a test across the set of ops will result in a test function per op.
The decision of how to parametrize / what to parametrize over is intended to be implemented
by each derived class.
In the details, the decorator adds a 'parametrize_fn' property to the test function that is called
during device-specific test instantiation performed in instantiate_device_type_tests(). Because of this,
there is no need to parametrize over device type, as that is already handled separately.
If the decorator is applied to a test function that already has a 'parametrize_fn' property, a new
composite 'parametrize_fn' will be created that generates tests with the product of the parameters
generated by the old and new parametrize_fns. This allows for convenient composability of decorators.
"""
def _parametrize_test(self, test, generic_cls, device_cls):
"""
Parametrizes the given test function across whatever dimension is specified by the derived class.
Tests can be parametrized over any arbitrary dimension or combination of dimensions, such as all
ops, all modules, or all ops + their associated dtypes.
Args:
test (fn): Test function to parametrize over
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
device_cls (class): Device-specialized test class object (e.g. TestFooCPU); set to None
if the tests are not part of a device-specific set
Returns:
Generator object returning 3-tuples of:
test (fn): Parametrized test function; must support a device arg and args for any params
test_name (str): Parametrized suffix for the test (e.g. opname_int64); will be appended to
the base name of the test
param_kwargs (dict): Param kwargs to pass to the test (e.g. {'op': 'add', 'dtype': torch.int64})
"""
raise NotImplementedError
def __call__(self, fn):
if hasattr(fn, 'parametrize_fn'):
# Do composition with the product of args.
old_parametrize_fn = fn.parametrize_fn
new_parametrize_fn = self._parametrize_test
fn.parametrize_fn = compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn)
else:
fn.parametrize_fn = self._parametrize_test
return fn
def compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn):
"""
Returns a parametrize_fn that parametrizes over the product of the parameters handled
by the given parametrize_fns. Each given parametrize_fn should each have the signature
f(test, generic_cls, device_cls).
The test names will be a combination of the names produced by the parametrize_fns in
"<new_name>_<old_name>" order. This order is done to match intuition for constructed names
when composing multiple decorators; the names will be built in top to bottom order when stacking
parametrization decorators.
Args:
old_parametrize_fn (callable) - First parametrize_fn to compose.
new_parametrize_fn (callable) - Second parametrize_fn to compose.
"""
def composite_fn(test, generic_cls, device_cls,
old_parametrize_fn=old_parametrize_fn,
new_parametrize_fn=new_parametrize_fn):
old_tests = [(test, test_name, param_kwargs) for (test, test_name, param_kwargs) in
old_parametrize_fn(test, generic_cls, device_cls)]
for (old_test, old_test_name, old_param_kwargs) in old_tests:
for (new_test, new_test_name, new_param_kwargs) in \
new_parametrize_fn(old_test, generic_cls, device_cls):
redundant_params = set(old_param_kwargs.keys()).intersection(new_param_kwargs.keys())
if redundant_params:
raise RuntimeError('Parametrization over the same parameter by multiple parametrization '
'decorators is not supported. For test "{}", the following parameters '
'are handled multiple times: {}'.format(
test.__name__, redundant_params))
full_param_kwargs = {**old_param_kwargs, **new_param_kwargs}
merged_test_name = '{}{}{}'.format(new_test_name,
'_' if old_test_name != '' and new_test_name != '' else '',
old_test_name)
yield (new_test, merged_test_name, full_param_kwargs)
return composite_fn
def instantiate_parametrized_tests(generic_cls):
"""
Instantiates tests that have been decorated with a parametrize_fn. This is generally performed by a
decorator subclass of _TestParametrizer. The generic test will be replaced on the test class by
parametrized tests with specialized names.
You can also use it as a class decorator. E.g.
```
@instantiate_parametrized_tests
class TestFoo(TestCase):
...
```
Args:
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
"""
for attr_name in tuple(dir(generic_cls)):
class_attr = getattr(generic_cls, attr_name)
if not hasattr(class_attr, 'parametrize_fn'):
continue
# Remove the generic test from the test class.
delattr(generic_cls, attr_name)
# Add parametrized tests to the test class.
def instantiate_test_helper(cls, name, test, param_kwargs):
@wraps(test)
def instantiated_test(self, param_kwargs=param_kwargs):
test(self, **param_kwargs)
assert not hasattr(generic_cls, name), "Redefinition of test {0}".format(name)
setattr(generic_cls, name, instantiated_test)
for (test, test_suffix, param_kwargs) in class_attr.parametrize_fn(
class_attr, generic_cls=generic_cls, device_cls=None):
full_name = '{}_{}'.format(test.__name__, test_suffix)
instantiate_test_helper(cls=generic_cls, name=full_name, test=test, param_kwargs=param_kwargs)
return generic_cls
class subtest(object):
"""
Explicit subtest case for use with test parametrization.
Allows for explicit naming of individual subtest cases as well as applying
decorators to the parametrized test.
Args:
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name (str): Optional name to use for the test.
decorators (iterable): Iterable of decorators to apply to the generated test.
"""
__slots__ = ['arg_values', 'name', 'decorators']
def __init__(self, arg_values, name=None, decorators=None):
self.arg_values = arg_values
self.name = name
self.decorators = decorators if decorators else []
class parametrize(_TestParametrizer):
"""
Decorator for applying generic test parametrizations.
The interface for this decorator is modeled after `@pytest.mark.parametrize`.
Basic usage between this decorator and pytest's is identical. The first argument
should be a string containing comma-separated names of parameters for the test, and
the second argument should be an iterable returning values or tuples of values for
the case of multiple parameters.
Beyond this basic usage, the decorator provides some additional functionality that
pytest does not.
1. Parametrized tests end up as generated test functions on unittest test classes.
Since this differs from how pytest works, this decorator takes on the additional
responsibility of naming these test functions. The default test names consists of
the test's base name followed by each parameter name + value (e.g. "test_bar_x_1_y_foo"),
but custom names can be defined using `name_fn` or the `subtest` structure (see below).
2. The decorator specially handles parameter values of type `subtest`, which allows for
more fine-grained control over both test naming and test execution. In particular, it can
be used to tag subtests with explicit test names or apply arbitrary decorators (see examples
below).
Examples::
@parametrize("x", range(5))
def test_foo(self, x):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')])
def test_bar(self, x, y):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')],
name_fn=lambda x, y: '{}_{}'.format(x, y))
def test_bar_custom_names(self, x, y):
...
@parametrize("x, y", [subtest((1, 2), name='double'),
subtest((1, 3), name='triple', decorators=[unittest.expectedFailure]),
subtest((1, 4), name='quadruple')])
def test_baz(self, x, y):
...
Args:
arg_str (str): String of arg names separate by commas (e.g. "x,y").
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name_fn (Callable): Optional function that takes in parameters and returns subtest name.
"""
def __init__(self, arg_str, arg_values, name_fn=None):
self.arg_names: List[str] = [s.strip() for s in arg_str.split(',')]
self.arg_values = arg_values
self.name_fn = name_fn
def _formatted_str_repr(self, name, value):
""" Returns a string representation for the given arg that is suitable for use in test function names. """
if isinstance(value, torch.dtype):
return dtype_name(value)
elif isinstance(value, torch.device):
return str(value)
# Can't use isinstance as it would cause a circular import
elif value.__class__.__name__ == 'OpInfo' or value.__class__.__name__ == 'ModuleInfo':
return value.formatted_name
else:
# Include name and value separated by underscore.
return '{}_{}'.format(name, str(value).replace('.', '_'))
def _default_subtest_name(self, values):
return '_'.join([self._formatted_str_repr(a, v) for a, v in zip(self.arg_names, values)])
def _get_subtest_name(self, values, explicit_name=None):
if explicit_name:
subtest_name = explicit_name
elif self.name_fn:
subtest_name = self.name_fn(*values)
else:
subtest_name = self._default_subtest_name(values)
return subtest_name
def _parametrize_test(self, test, generic_cls, device_cls):
if len(self.arg_names) == 0:
# No additional parameters needed for the test.
test_name = ''
yield (test, test_name, {})
else:
# Each "values" item is expected to be either:
# * A tuple of values with one for each arg. For a single arg, a single item is expected.
# * A subtest instance with arg_values matching the previous.
values = check_exhausted_iterator = object()
for values in self.arg_values:
maybe_name = None
if isinstance(values, subtest):
sub = values
values = sub.arg_values
maybe_name = sub.name
# Apply decorators.
@wraps(test)
def test_wrapper(*args, **kwargs):
return test(*args, **kwargs)
for decorator in sub.decorators:
test_wrapper = decorator(test_wrapper)
gen_test = test_wrapper
else:
gen_test = test
values = list(values) if len(self.arg_names) > 1 else [values]
if len(values) != len(self.arg_names):
raise RuntimeError('Expected # values == # arg names, but got: {} '
'values and {} names for test "{}"'.format(
len(values), len(self.arg_names), test.__name__))
param_kwargs = {
name: value for name, value in zip(self.arg_names, values)
}
test_name = self._get_subtest_name(values, explicit_name=maybe_name)
if '.' in test_name:
raise RuntimeError('Test name cannot contain periods, but got: {}'.format(test_name))
yield (gen_test, test_name, param_kwargs)
if values is check_exhausted_iterator:
raise ValueError('An empty arg_values was passed to @parametrize. '
'Note that this may result from reuse of a generator.')
class ProfilingMode(Enum):
LEGACY = 1
SIMPLE = 2
PROFILING = 3
def cppProfilingFlagsToProfilingMode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._get_graph_executor_optimize(True)
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._get_graph_executor_optimize(old_prof_mode_state)
if old_prof_exec_state:
if old_prof_mode_state:
return ProfilingMode.PROFILING
else:
return ProfilingMode.SIMPLE
else:
return ProfilingMode.LEGACY
@contextmanager
def enable_profiling_mode_for_profiling_tests():
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._get_graph_executor_optimize(True)
try:
yield
finally:
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._get_graph_executor_optimize(old_prof_mode_state)
@contextmanager
def enable_profiling_mode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._get_graph_executor_optimize(True)
try:
yield
finally:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._get_graph_executor_optimize(old_prof_mode_state)
@contextmanager
def num_profiled_runs(num_runs):
old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs)
try:
yield
finally:
torch._C._jit_set_num_profiled_runs(old_num_runs)
func_call = torch._C.ScriptFunction.__call__
meth_call = torch._C.ScriptMethod.__call__
def prof_callable(callable, *args, **kwargs):
if 'profile_and_replay' in kwargs:
del kwargs['profile_and_replay']
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
with enable_profiling_mode_for_profiling_tests():
callable(*args, **kwargs)
return callable(*args, **kwargs)
return callable(*args, **kwargs)
def prof_func_call(*args, **kwargs):
return prof_callable(func_call, *args, **kwargs)
def prof_meth_call(*args, **kwargs):
return prof_callable(meth_call, *args, **kwargs)
# TODO fix when https://github.com/python/mypy/issues/2427 is address
torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[assignment]
torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[assignment]
def _get_test_report_path():
# allow users to override the test file location. We need this
# because the distributed tests run the same test file multiple
# times with different configurations.
override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE')
test_source = override if override is not None else 'python-unittest'
return os.path.join('test-reports', test_source)
is_running_via_run_test = "run_test.py" in getattr(__main__, "__file__", "")
parser = argparse.ArgumentParser(add_help=not is_running_via_run_test)
parser.add_argument('--subprocess', action='store_true',
help='whether to run each test in a subprocess')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--accept', action='store_true')
parser.add_argument('--jit_executor', type=str)
parser.add_argument('--repeat', type=int, default=1)
parser.add_argument('--test_bailouts', action='store_true')
parser.add_argument('--save-xml', nargs='?', type=str,
const=_get_test_report_path(),
default=_get_test_report_path() if IS_CI else None)
parser.add_argument('--discover-tests', action='store_true')
parser.add_argument('--log-suffix', type=str, default="")
parser.add_argument('--run-parallel', type=int, default=1)
parser.add_argument('--import-slow-tests', type=str, nargs='?', const=SLOW_TESTS_FILE)
parser.add_argument('--import-disabled-tests', type=str, nargs='?', const=DISABLED_TESTS_FILE)
# Only run when -h or --help flag is active to display both unittest and parser help messages.
def run_unittest_help(argv):
unittest.main(argv=argv)
if '-h' in sys.argv or '--help' in sys.argv:
help_thread = threading.Thread(target=run_unittest_help, args=(sys.argv,))
help_thread.start()
help_thread.join()
args, remaining = parser.parse_known_args()
if args.jit_executor == 'legacy':
GRAPH_EXECUTOR = ProfilingMode.LEGACY
elif args.jit_executor == 'profiling':
GRAPH_EXECUTOR = ProfilingMode.PROFILING
elif args.jit_executor == 'simple':
GRAPH_EXECUTOR = ProfilingMode.SIMPLE
else:
# infer flags based on the default settings
GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode()
IMPORT_SLOW_TESTS = args.import_slow_tests
IMPORT_DISABLED_TESTS = args.import_disabled_tests
LOG_SUFFIX = args.log_suffix
RUN_PARALLEL = args.run_parallel
TEST_BAILOUTS = args.test_bailouts
TEST_DISCOVER = args.discover_tests
TEST_IN_SUBPROCESS = args.subprocess
TEST_SAVE_XML = args.save_xml
REPEAT_COUNT = args.repeat
SEED = args.seed
if not expecttest.ACCEPT:
expecttest.ACCEPT = args.accept
UNITTEST_ARGS = [sys.argv[0]] + remaining
torch.manual_seed(SEED)
# CI Prefix path used only on CI environment
CI_TEST_PREFIX = str(Path(os.getcwd()))
CI_PT_ROOT = str(Path(os.getcwd()).parent)
CI_FUNCTORCH_ROOT = str(os.path.join(Path(os.getcwd()).parent, "functorch"))
def wait_for_process(p):
try:
return p.wait()
except KeyboardInterrupt:
# Give `p` a chance to handle KeyboardInterrupt. Without this,
# `pytest` can't print errors it collected so far upon KeyboardInterrupt.
exit_status = p.wait(timeout=5)
if exit_status is not None:
return exit_status
else:
p.kill()
raise
except: # noqa: B001,E722, copied from python core library
p.kill()
raise
finally:
# Always call p.wait() to ensure exit
p.wait()
def shell(command, cwd=None, env=None):
sys.stdout.flush()
sys.stderr.flush()
# The following cool snippet is copied from Py3 core library subprocess.call
# only the with
# 1. `except KeyboardInterrupt` block added for SIGINT handling.
# 2. In Py2, subprocess.Popen doesn't return a context manager, so we do
# `p.wait()` in a `final` block for the code to be portable.
#
# https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323
assert not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens"
p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env)
return wait_for_process(p)
def discover_test_cases_recursively(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
return [suite_or_case]
rc = []
for element in suite_or_case:
print(element)
rc.extend(discover_test_cases_recursively(element))
return rc
def get_test_names(test_cases):
return ['.'.join(case.id().split('.')[-2:]) for case in test_cases]
def _print_test_names():
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
for name in get_test_names(test_cases):
print(name)
def chunk_list(lst, nchunks):
return [lst[i::nchunks] for i in range(nchunks)]
# sanitize filename e.g., distributed/pipeline/sync/skip/test_api.py -> distributed.pipeline.sync.skip.test_api
def sanitize_test_filename(filename):
# inspect.getfile returns absolute path in some CI jobs, converting it to relative path if needed
if filename.startswith(CI_TEST_PREFIX):
filename = filename[len(CI_TEST_PREFIX) + 1:]
strip_py = re.sub(r'.py$', '', filename)
return re.sub('/', r'.', strip_py)
# hack until https://github.com/pytorch/pytorch/issues/82109 is resolved
def sanitize_if_functorch_test_filename(filename):
# absolute filenames must be converted to relative paths, otherwise,
# we cannot prepend test-reports/ to it
# (e.g. test-reports\\C:\\... on windows is nonsense)
if filename.startswith(CI_FUNCTORCH_ROOT):
filename = filename[len(CI_PT_ROOT) + 1:]
return filename
def lint_test_case_extension(suite):
succeed = True
for test_case_or_suite in suite:
test_case = test_case_or_suite
if isinstance(test_case_or_suite, unittest.TestSuite):
first_test = test_case_or_suite._tests[0] if len(test_case_or_suite._tests) > 0 else None
if first_test is not None and isinstance(first_test, unittest.TestSuite):
return succeed and lint_test_case_extension(test_case_or_suite)
test_case = first_test
if test_case is not None:
test_class = test_case.id().split('.', 1)[1].split('.')[0]
if not isinstance(test_case, TestCase):
err = "This test class should extend from torch.testing._internal.common_utils.TestCase but it doesn't."
print(f"{test_class} - failed. {err}")
succeed = False
return succeed
def sanitize_pytest_xml(xml_file: str):
# pytext xml is different from unittext xml, this function makes pytest xml more similar to unittest xml
# consider somehow modifying the XML logger in conftest to do this instead
import xml.etree.ElementTree as ET
tree = ET.parse(xml_file)
for testcase in tree.iter('testcase'):
full_classname = testcase.attrib['classname']
regex_result = re.search(r"^test\.(.*)\.([^\.]*)$", full_classname)
classname = regex_result.group(2)
file = regex_result.group(1).replace('.', "/")
testcase.set('classname', classname)
testcase.set('file', f"{file}.py")
tree.write(xml_file)
def run_tests(argv=UNITTEST_ARGS):
# import test files.
if IMPORT_SLOW_TESTS:
if os.path.exists(IMPORT_SLOW_TESTS):
with open(IMPORT_SLOW_TESTS, 'r') as fp:
# use env vars so pytest-xdist subprocesses can still access them
os.environ['SLOW_TESTS_DICT'] = fp.read()
else:
print(f'[WARNING] slow test file provided but not found: {IMPORT_SLOW_TESTS}')
if IMPORT_DISABLED_TESTS:
# This is unsafe to store the list of disabled tests on Windows in a single env
# variable because it has an upper limit of 32767 characters in length. We will
# need to think of a better way to handle this in Windows if the test time there
# is impact by this
if os.path.exists(IMPORT_DISABLED_TESTS) and not IS_WINDOWS:
with open(IMPORT_DISABLED_TESTS, 'r') as fp:
os.environ['DISABLED_TESTS_DICT'] = fp.read()
else:
print(f'[WARNING] disabled test file provided but not found: {IMPORT_DISABLED_TESTS}'
f' or we are on Windows whose env variable has an upper limit of 32767 chars')
# Determine the test launch mechanism
if TEST_DISCOVER:
_print_test_names()
return
# Before running the tests, lint to check that every test class extends from TestCase
suite = unittest.TestLoader().loadTestsFromModule(__main__)
if not lint_test_case_extension(suite):
sys.exit(1)
if TEST_IN_SUBPROCESS:
failed_tests = []
test_cases = discover_test_cases_recursively(suite)
for case in test_cases:
test_case_full_name = case.id().split('.', 1)[1]
other_args = []
if IMPORT_DISABLED_TESTS:
other_args.append('--import-disabled-tests')
if IMPORT_SLOW_TESTS:
other_args.append('--import-slow-tests')
cmd = [sys.executable] + [argv[0]] + other_args + argv[1:] + [test_case_full_name]
string_cmd = " ".join(cmd)
exitcode = shell(cmd)
if exitcode != 0:
# This is sort of hacky, but add on relevant env variables for distributed tests.
if 'TestDistBackendWithSpawn' in test_case_full_name:
backend = os.environ.get("BACKEND", "")
world_size = os.environ.get("WORLD_SIZE", "")
env_prefix = f"BACKEND={backend} WORLD_SIZE={world_size}"
string_cmd = env_prefix + " " + string_cmd
# Log the command to reproduce the failure.
print(f"Test exited with non-zero exitcode {exitcode}. Command to reproduce: {string_cmd}")
failed_tests.append(test_case_full_name)
assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format(
len(failed_tests), '\n\t'.join(failed_tests))
elif RUN_PARALLEL > 1:
test_cases = discover_test_cases_recursively(suite)
test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL)
processes = []
for i in range(RUN_PARALLEL):
command = [sys.executable] + argv + ['--log-suffix=-shard-{}'.format(i + 1)] + test_batches[i]
processes.append(subprocess.Popen(command, universal_newlines=True))
failed = False
for p in processes:
failed |= wait_for_process(p) != 0
assert not failed, "Some test shards have failed"
elif TEST_SAVE_XML is not None:
# import here so that non-CI doesn't need xmlrunner installed
import xmlrunner # type: ignore[import]
from xmlrunner.result import _XMLTestResult # type: ignore[import]
class XMLTestResultVerbose(_XMLTestResult):
"""
Adding verbosity to test outputs:
by default test summary prints 'skip',
but we want to also print the skip reason.
GH issue: https://github.com/pytorch/pytorch/issues/69014
This works with unittest_xml_reporting<=3.2.0,>=2.0.0
(3.2.0 is latest at the moment)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def addSkip(self, test, reason):
super().addSkip(test, reason)
for c in self.callback.__closure__:
if isinstance(c.cell_contents, str) and c.cell_contents == 'skip':
# this message is printed in test summary;
# it stands for `verbose_str` captured in the closure
c.cell_contents = f"skip: {reason}"
test_filename = inspect.getfile(sys._getframe(1))
test_filename = sanitize_if_functorch_test_filename(test_filename)
test_filename = sanitize_test_filename(test_filename)
test_report_path = TEST_SAVE_XML + LOG_SUFFIX
test_report_path = os.path.join(test_report_path, test_filename)
build_environment = os.environ.get("BUILD_ENVIRONMENT", "")
if test_filename in PYTEST_FILES and not IS_SANDCASTLE and not (
"cuda" in build_environment and "linux" in build_environment
):
# exclude linux cuda tests because we run into memory issues when running in parallel
import pytest
os.environ["NO_COLOR"] = "1"
os.environ["USING_PYTEST"] = "1"
pytest_report_path = test_report_path.replace('python-unittest', 'python-pytest')
os.makedirs(pytest_report_path, exist_ok=True)
# part of our xml parsing looks for grandparent folder names
pytest_report_path = os.path.join(pytest_report_path, f"{test_filename}.xml")
print(f'Test results will be stored in {pytest_report_path}')
# mac slower on 4 proc than 3
num_procs = 3 if "macos" in build_environment else 4
# f = failed
# E = error
# X = unexpected success
exit_code = pytest.main(args=[inspect.getfile(sys._getframe(1)), f'-n={num_procs}', '-vv', '-x',
'--reruns=2', '-rfEX', f'--junit-xml-reruns={pytest_report_path}'])
del os.environ["USING_PYTEST"]
sanitize_pytest_xml(f'{pytest_report_path}')
print("Skip info is located in the xml test reports, please either go to s3 or the hud to download them")
# exitcode of 5 means no tests were found, which happens since some test configs don't
# run tests from certain files
exit(0 if exit_code == 5 else exit_code)
else:
os.makedirs(test_report_path, exist_ok=True)
verbose = '--verbose' in argv or '-v' in argv
if verbose:
print(f'Test results will be stored in {test_report_path}')
unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner(
output=test_report_path,
verbosity=2 if verbose else 1,
resultclass=XMLTestResultVerbose))
elif REPEAT_COUNT > 1:
for _ in range(REPEAT_COUNT):
if not unittest.main(exit=False, argv=argv).result.wasSuccessful():
sys.exit(-1)
else:
unittest.main(argv=argv)
IS_LINUX = sys.platform == "linux"
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_PPC = platform.machine() == "ppc64le"
IS_X86 = platform.machine() in ('x86_64', 'i386')
IS_ARM64 = platform.machine() == 'arm64'
def is_avx512_vnni_supported():
if sys.platform != 'linux':
return False
with open("/proc/cpuinfo", encoding="ascii") as f:
lines = f.read()
return "vnni" in lines
IS_AVX512_VNNI_SUPPORTED = is_avx512_vnni_supported()
if IS_WINDOWS:
@contextmanager
def TemporaryFileName(*args, **kwargs):
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
if 'delete' in kwargs:
if kwargs['delete'] is not False:
raise UserWarning("only TemporaryFileName with delete=False is supported on Windows.")
else:
kwargs['delete'] = False
f = tempfile.NamedTemporaryFile(*args, **kwargs)
try:
f.close()
yield f.name
finally:
os.unlink(f.name)
else:
@contextmanager # noqa: T484
def TemporaryFileName(*args, **kwargs):
with tempfile.NamedTemporaryFile(*args, **kwargs) as f:
yield f.name
if IS_WINDOWS:
@contextmanager
def TemporaryDirectoryName(suffix=None):
# On Windows the directory created by TemporaryDirectory is likely to be removed prematurely,
# so we first create the directory using mkdtemp and then remove it manually
try:
dir_name = tempfile.mkdtemp(suffix=suffix)
yield dir_name
finally:
shutil.rmtree(dir_name)
else:
@contextmanager # noqa: T484
def TemporaryDirectoryName(suffix=None):
with tempfile.TemporaryDirectory(suffix=suffix) as d:
yield d
IS_FILESYSTEM_UTF8_ENCODING = sys.getfilesystemencoding() == 'utf-8'
def _check_module_exists(name: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
try:
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None
except ImportError:
return False
TEST_NUMPY = _check_module_exists('numpy')
TEST_FAIRSEQ = _check_module_exists('fairseq')
TEST_SCIPY = _check_module_exists('scipy')
TEST_MKL = torch.backends.mkl.is_available()
TEST_CUDA = torch.cuda.is_available()
TEST_NUMBA = _check_module_exists('numba')
TEST_DILL = _check_module_exists('dill')
TEST_LIBROSA = _check_module_exists('librosa') and not IS_ARM64
BUILD_WITH_CAFFE2 = torch.onnx._CAFFE2_ATEN_FALLBACK
# Python 2.7 doesn't have spawn
NO_MULTIPROCESSING_SPAWN = os.environ.get('NO_MULTIPROCESSING_SPAWN', '0') == '1'
TEST_WITH_ASAN = os.getenv('PYTORCH_TEST_WITH_ASAN', '0') == '1'
TEST_WITH_DEV_DBG_ASAN = os.getenv('PYTORCH_TEST_WITH_DEV_DBG_ASAN', '0') == '1'
TEST_WITH_TSAN = os.getenv('PYTORCH_TEST_WITH_TSAN', '0') == '1'
TEST_WITH_UBSAN = os.getenv('PYTORCH_TEST_WITH_UBSAN', '0') == '1'
TEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1'
# TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen
# See #64427
TEST_WITH_MIOPEN_SUGGEST_NHWC = os.getenv('PYTORCH_MIOPEN_SUGGEST_NHWC', '0') == '1'
# Enables tests that are slow to run (disabled by default)
TEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1'
# Disables non-slow tests (these tests enabled by default)
# This is usually used in conjunction with TEST_WITH_SLOW to
# run *only* slow tests. (I could have done an enum, but
# it felt a little awkward.
TEST_SKIP_FAST = os.getenv('PYTORCH_TEST_SKIP_FAST', '0') == '1'
# Enables crossref tests, in addition to standard tests which
# are being run. crossref tests work by installing a torch
# function mode that runs extra compute alongside the regular
# computation that happens with the test. After both computations
# are done, we cross-reference them (thus the name) to check for
# correction, before throwing out the extra compute and proceeding
# as we had before. By default, we don't run these tests.
TEST_WITH_CROSSREF = os.getenv('PYTORCH_TEST_WITH_CROSSREF', '0') == '1'
def skipIfCrossRef(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_CROSSREF:
raise unittest.SkipTest("test doesn't currently with crossref")
else:
fn(*args, **kwargs)
return wrapper
class CrossRefMode(torch.overrides.TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
r = func(*args, **kwargs)
return r
# Run PyTorch tests with TorchDynamo
TEST_WITH_TORCHDYNAMO = os.getenv('PYTORCH_TEST_WITH_DYNAMO') == '1'
if TEST_WITH_TORCHDYNAMO:
import torchdynamo
# torchdynamo.config.trace = True
# torchdynamo.config.debug = True
torchdynamo.config.print_internal_exceptions = False
# TODO - Collect errors with fake tensors
torchdynamo.config.fake_tensor_propagation = False
# Do not spend time on helper functions that are called with different inputs
torchdynamo.config.cache_size_limit = 8
def skipIfTorchDynamo(msg="test doesn't currently work with torchdynamo"):
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_TORCHDYNAMO:
raise unittest.SkipTest(msg)
else:
fn(*args, **kwargs)
return wrapper
return decorator
# Determine whether to enable cuda memory leak check.
# CUDA mem leak check is expensive and thus we don't want to execute it on every
# test case / configuration.
# If this is True then CUDA memory leak checks are skipped. If this is false
# then CUDA memory leak checks are performed.
# See: https://github.com/pytorch/pytorch/pull/59402#issuecomment-858811135
TEST_SKIP_CUDA_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_SKIP_CUDA_MEM_LEAK_CHECK', '0') == '1'
# True if CI is running TBB-enabled Pytorch
IS_TBB = "tbb" in os.getenv("BUILD_ENVIRONMENT", "")
# Dict of NumPy dtype -> torch dtype (when the correspondence exists)
numpy_to_torch_dtype_dict = {
np.bool_ : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
np.complex128 : torch.complex128
}
# numpy dtypes like np.float64 are not instances, but rather classes. This leads to rather absurd cases like
# np.float64 != np.dtype("float64") but np.float64 == np.dtype("float64").type.
# Especially when checking against a reference we can't be sure which variant we get, so we simply try both.
def numpy_to_torch_dtype(np_dtype):
try:
return numpy_to_torch_dtype_dict[np_dtype]
except KeyError:
return numpy_to_torch_dtype_dict[np_dtype.type]
def has_corresponding_torch_dtype(np_dtype):
try:
numpy_to_torch_dtype(np_dtype)
return True
except KeyError:
return False
if IS_WINDOWS:
# Size of `np.intc` is platform defined.
# It is returned by functions like `bitwise_not`.
# On Windows `int` is 32-bit
# https://docs.microsoft.com/en-us/cpp/cpp/data-type-ranges?view=msvc-160
numpy_to_torch_dtype_dict[np.intc] = torch.int
# Dict of torch dtype -> NumPy dtype
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
torch_to_numpy_dtype_dict.update({
torch.bfloat16: np.float32,
torch.complex32: np.complex64
})
def skipIfRocm(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_ROCM:
raise unittest.SkipTest("test doesn't currently work on the ROCm stack")
else:
fn(*args, **kwargs)
return wrapper
def skipIfMps(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if torch.backends.mps.is_available():
raise unittest.SkipTest("test doesn't currently work with MPS")
else:
fn(*args, **kwargs)
return wrapper
# Skips a test on CUDA if ROCm is available and its version is lower than requested.
def skipIfRocmVersionLessThan(version=None):
def dec_fn(fn):
@wraps(fn)
def wrap_fn(self, *args, **kwargs):
if TEST_WITH_ROCM:
rocm_version = str(torch.version.hip)
rocm_version = rocm_version.split("-")[0] # ignore git sha
rocm_version_tuple = tuple(int(x) for x in rocm_version.split("."))
if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version):
reason = "ROCm {0} is available but {1} required".format(rocm_version_tuple, version)
raise unittest.SkipTest(reason)
return fn(self, *args, **kwargs)
return wrap_fn
return dec_fn
def skipIfNotMiopenSuggestNHWC(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_MIOPEN_SUGGEST_NHWC:
raise unittest.SkipTest("test doesn't currently work without MIOpen NHWC activation")
else:
fn(*args, **kwargs)
return wrapper
# Context manager for setting deterministic flag and automatically
# resetting it to its original value
class DeterministicGuard:
def __init__(self, deterministic, *, warn_only=False):
self.deterministic = deterministic
self.warn_only = warn_only
def __enter__(self):
self.deterministic_restore = torch.are_deterministic_algorithms_enabled()
self.warn_only_restore = torch.is_deterministic_algorithms_warn_only_enabled()
torch.use_deterministic_algorithms(
self.deterministic,
warn_only=self.warn_only)
def __exit__(self, exception_type, exception_value, traceback):
torch.use_deterministic_algorithms(
self.deterministic_restore,
warn_only=self.warn_only_restore)
# Context manager for setting cuda sync debug mode and reset it
# to original value
# we are not exposing it to the core because sync debug mode is
# global and thus not thread safe
class CudaSyncGuard:
def __init__(self, sync_debug_mode):
self.mode = sync_debug_mode
def __enter__(self):
self.debug_mode_restore = torch.cuda.get_sync_debug_mode()
torch.cuda.set_sync_debug_mode(self.mode)
def __exit__(self, exception_type, exception_value, traceback):
torch.cuda.set_sync_debug_mode(self.debug_mode_restore)
# This decorator can be used for API tests that call
# torch.use_deterministic_algorithms(). When the test is finished, it will
# restore the previous deterministic flag setting.
#
# If CUDA >= 10.2, this will set the environment variable
# CUBLAS_WORKSPACE_CONFIG=:4096:8 so that the error associated with that
# setting is not thrown during the test unless the test changes that variable
# on purpose. The previous CUBLAS_WORKSPACE_CONFIG setting will also be
# restored once the test is finished.
#
# Note that if a test requires CUDA to actually register the changed
# CUBLAS_WORKSPACE_CONFIG variable, a new subprocess must be created, because
# CUDA only checks the variable when the runtime initializes. Tests can be
# run inside a subprocess like so:
#
# import subprocess, sys, os
# script = '''
# # Test code should go here
# '''
# try:
# subprocess.check_output(
# [sys.executable, '-c', script],
# stderr=subprocess.STDOUT,
# cwd=os.path.dirname(os.path.realpath(__file__)),
# env=os.environ.copy())
# except subprocess.CalledProcessError as e:
# error_message = e.output.decode('utf-8')
# # Handle exceptions raised by the subprocess here
#
def wrapDeterministicFlagAPITest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with DeterministicGuard(
torch.are_deterministic_algorithms_enabled(),
warn_only=torch.is_deterministic_algorithms_warn_only_enabled()):
class CuBLASConfigGuard:
cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG'
def __enter__(self):
self.is_cuda10_2_or_higher = (
(torch.version.cuda is not None)
and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2]))
if self.is_cuda10_2_or_higher:
self.cublas_config_restore = os.environ.get(self.cublas_var_name)
os.environ[self.cublas_var_name] = ':4096:8'
def __exit__(self, exception_type, exception_value, traceback):
if self.is_cuda10_2_or_higher:
cur_cublas_config = os.environ.get(self.cublas_var_name)
if self.cublas_config_restore is None:
if cur_cublas_config is not None:
del os.environ[self.cublas_var_name]
else:
os.environ[self.cublas_var_name] = self.cublas_config_restore
with CuBLASConfigGuard():
fn(*args, **kwargs)
return wrapper
def skipIfCompiledWithoutNumpy(fn):
# Even if the numpy module is present, if `USE_NUMPY=0` is used during the
# build, numpy tests will fail
numpy_support = TEST_NUMPY
if numpy_support:
try:
# The numpy module is present, verify that PyTorch is compiled with
# numpy support
torch.from_numpy(np.array([2, 2]))
except RuntimeError:
numpy_support = False
@wraps(fn)
def wrapper(*args, **kwargs):
if not numpy_support:
raise unittest.SkipTest("PyTorch was compiled without numpy support")
else:
fn(*args, **kwargs)
return wrapper
def _test_function(fn, device):
def run_test_function(self):
return fn(self, device)
return run_test_function
def skipIfNoXNNPACK(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch.backends.xnnpack.enabled:
raise unittest.SkipTest('XNNPACK must be enabled for these tests. Please build with USE_XNNPACK=1.')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNoLapack(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch._C.has_lapack:
raise unittest.SkipTest('PyTorch compiled without Lapack')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNotRegistered(op_name, message):
"""Wraps the decorator to hide the import of the `core`.
Args:
op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`.
message: message to fail with.
Usage:
@skipIfNotRegistered('MyOp', 'MyOp is not linked!')
This will check if 'MyOp' is in the caffe2.python.core
"""
if not BUILD_WITH_CAFFE2:
return unittest.skip("Pytorch is compiled without Caffe2")
try:
from caffe2.python import core
skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS,
message)
except ImportError:
skipper = unittest.skip("Cannot import `caffe2.python.core`")
return skipper
def _decide_skip_caffe2(expect_caffe2, reason):
def skip_dec(func):
@wraps(func)
def wrapper(self):
if torch.onnx._CAFFE2_ATEN_FALLBACK != expect_caffe2:
raise unittest.SkipTest(reason)
return func(self)
return wrapper
return skip_dec
skipIfCaffe2 = _decide_skip_caffe2(False, "Not compatible with Caffe2")
skipIfNoCaffe2 = _decide_skip_caffe2(True, "Caffe2 is not available")
def skipIfNoSciPy(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_SCIPY:
raise unittest.SkipTest("test require SciPy, but SciPy not found")
else:
fn(*args, **kwargs)
return wrapper
def skipIfTBB(message="This test makes TBB sad"):
def dec_fn(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if IS_TBB:
raise unittest.SkipTest(message)
else:
fn(*args, **kwargs)
return wrapper
return dec_fn
def slowTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
else:
fn(*args, **kwargs)
wrapper.__dict__['slow_test'] = True
return wrapper
def slowAwareTest(fn):
fn.__dict__['slow_test'] = True
return fn
def skipCUDAMemoryLeakCheckIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True
fn._do_cuda_memory_leak_check = not condition
return fn
return dec
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_non_default_stream', True): # if current True
fn._do_cuda_non_default_stream = not condition
return fn
return dec
def suppress_warnings(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn(*args, **kwargs)
return wrapper
def to_gpu(obj, type_map=None):
if type_map is None:
type_map = {}
if isinstance(obj, torch.Tensor):
assert obj.is_leaf
t = type_map.get(obj.dtype, obj.dtype)
with torch.no_grad():
res = obj.clone().to(dtype=t, device="cuda")
res.requires_grad = obj.requires_grad
return res
elif torch.is_storage(obj):
return obj.new().resize_(obj.size()).copy_(obj)
elif isinstance(obj, list):
return [to_gpu(o, type_map) for o in obj]
elif isinstance(obj, tuple):
return tuple(to_gpu(o, type_map) for o in obj)
else:
return deepcopy(obj)
def get_function_arglist(func):
return inspect.getfullargspec(func).args
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
if TEST_NUMPY:
np.random.seed(seed)
@contextmanager
def disable_functorch():
guard = torch._C._DisableFuncTorch() # type: ignore[attr-defined]
try:
yield
finally:
del guard
@contextlib.contextmanager
def freeze_rng_state():
# no_dispatch needed for test_composite_compliance
# Some OpInfos use freeze_rng_state for rng determinism, but
# test_composite_compliance overrides dispatch for all torch functions
# which we need to disable to get and set rng state
with no_dispatch(), disable_functorch():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
try:
yield
finally:
# Modes are not happy with torch.cuda.set_rng_state
# because it clones the state (which could produce a Tensor Subclass)
# and then grabs the new tensor's data pointer in generator.set_state.
#
# In the long run torch.cuda.set_rng_state should probably be
# an operator.
with no_dispatch(), disable_functorch():
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(rng_state)
@contextlib.contextmanager
def set_default_dtype(dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
try:
yield
finally:
torch.set_default_dtype(saved_dtype)
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
def is_iterable_of_tensors(iterable, include_empty=False):
""" Returns True if iterable is an iterable of tensors and False o.w.
If the iterable is empty, the return value is :attr:`include_empty`
"""
# Tensor itself is iterable so we check this first
if isinstance(iterable, torch.Tensor):
return False
try:
if len(iterable) == 0:
return include_empty
for t in iter(iterable):
if not isinstance(t, torch.Tensor):
return False
except TypeError as te:
return False
return True
class CudaNonDefaultStream():
def __enter__(self):
# Before starting CUDA test save currently active streams on all
# CUDA devices and set new non default streams to all CUDA devices
# to ensure CUDA tests do not use default stream by mistake.
beforeDevice = torch.cuda.current_device()
self.beforeStreams = []
for d in range(torch.cuda.device_count()):
self.beforeStreams.append(torch.cuda.current_stream(d))
deviceStream = torch.cuda.Stream(device=d)
torch._C._cuda_setStream(deviceStream._cdata)
torch._C._cuda_setDevice(beforeDevice)
def __exit__(self, exec_type, exec_value, traceback):
# After completing CUDA test load previously active streams on all
# CUDA devices.
beforeDevice = torch.cuda.current_device()
for d in range(torch.cuda.device_count()):
torch._C._cuda_setStream(self.beforeStreams[d]._cdata)
torch._C._cuda_setDevice(beforeDevice)
class CudaMemoryLeakCheck():
def __init__(self, testcase, name=None):
self.name = testcase.id() if name is None else name
self.testcase = testcase
# initialize context & RNG to prevent false positive detections
# when the test is the first to initialize those
from torch.testing._internal.common_cuda import initialize_cuda_context_rng
initialize_cuda_context_rng()
# Stores CUDA memory data provided by PyTorch's caching allocator and
# the CUDA driver.
#
# NOTE: The undocumented torch.cuda.mem_get_info() returns
# (#free bytes, #total bytes available) on the GPU
def __enter__(self):
self.caching_allocator_befores = []
self.driver_befores = []
# Performs a gc if required (required if any CUDA memory is held)
num_devices = torch.cuda.device_count()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
# NOTE: gc is based exclusively on caching allocator memory
# because the driver will always have some bytes in use (context size?)
if caching_allocator_mem_allocated > 0:
gc.collect()
torch.cuda.empty_cache()
break
# Acquires caching allocator and driver statistics before the test is run
for i in range(num_devices):
self.caching_allocator_befores.append(torch.cuda.memory_allocated(i))
bytes_free, bytes_total = torch.cuda.mem_get_info(i)
driver_mem_allocated = bytes_total - bytes_free
self.driver_befores.append(driver_mem_allocated)
def __exit__(self, exec_type, exec_value, traceback):
# Don't check for leaks if an exception was thrown
if exec_type is not None:
return
# Compares caching allocator before/after statistics
# An increase in allocated memory is a discrepancy indicating a possible
# memory leak
discrepancy_detected = False
num_devices = torch.cuda.device_count()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
if caching_allocator_mem_allocated > self.caching_allocator_befores[i]:
discrepancy_detected = True
break
# Short-circuits if no discrepancy detected
if not discrepancy_detected:
return
# Validates the discrepancy persists after garbage collection and
# is confirmed by the driver API
# NOTE: driver API iscrepancies alone are ignored because with the jiterator
# some tests may permanently increase the CUDA context size and
# that will appear as a driver memory leak but is the expected behavior.
# GCs and clears the cache
gc.collect()
torch.cuda.empty_cache()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
bytes_free, bytes_total = torch.cuda.mem_get_info(i)
driver_mem_allocated = bytes_total - bytes_free
caching_allocator_discrepancy = False
driver_discrepancy = False
if caching_allocator_mem_allocated > self.caching_allocator_befores[i]:
caching_allocator_discrepancy = True
if driver_mem_allocated > self.driver_befores[i]:
driver_discrepancy = True
if caching_allocator_discrepancy and not driver_discrepancy:
# Just raises a warning if the leak is not validated by the
# driver API
# NOTE: this may be a problem with how the caching allocator collects its
# statistics or a leak too small to trigger the allocation of an
# additional block of memory by the CUDA driver
msg = ("CUDA caching allocator reports a memory leak not "
"verified by the driver API in {}! "
"Caching allocator allocated memory was {} and is now reported as {} "
"on device {}. "
"CUDA driver allocated memory was {} and is now {}.").format(
self.name,
self.caching_allocator_befores[i],
caching_allocator_mem_allocated,
i,
self.driver_befores[i],
driver_mem_allocated)
warnings.warn(msg)
elif caching_allocator_discrepancy and driver_discrepancy:
# A caching allocator discrepancy validated by the driver API is a
# failure (except on ROCm, see below)
msg = ("CUDA driver API confirmed a leak in {}! "
"Caching allocator allocated memory was {} and is now reported as {} "
"on device {}. "
"CUDA driver allocated memory was {} and is now {}.").format(
self.name,
self.caching_allocator_befores[i],
caching_allocator_mem_allocated,
i,
self.driver_befores[i],
driver_mem_allocated)
# See #62533
# ROCM: Sometimes the transient memory is reported as leaked memory
if TEST_WITH_ROCM:
warnings.warn(msg)
else:
raise RuntimeError(msg)
@contextmanager
def skip_exception_type(exc_type):
try:
yield
except exc_type as e:
raise unittest.SkipTest(f"not implemented: {e}") from e
# "min_satisfying_examples" setting has been deprecated in hypythesis
# 3.56.0 and removed in hypothesis 4.x
try:
import hypothesis
def settings(*args, **kwargs):
if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):
kwargs.pop('min_satisfying_examples')
return hypothesis.settings(*args, **kwargs)
hypothesis.settings.register_profile(
"pytorch_ci",
settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=50,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"dev",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"debug",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.load_profile(
"pytorch_ci" if IS_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE', 'dev')
)
except ImportError:
print('Fail to import hypothesis in common_utils, tests are not derandomized')
# Used in check_if_enable to see if a test method should be disabled by an issue,
# sanitizes a test method name from appended suffixes by @dtypes parametrization.
# e.g., an issue with title "DISABLED test_bitwise_ops (__main__.TestBinaryUfuncs)" should
# disabled ALL parametrized test_bitwise_ops tests, such test_bitwise_ops_cuda_int32
def remove_device_and_dtype_suffixes(test_name: str) -> str:
# import statement is localized to avoid circular dependency issues with common_device_type.py
from torch.testing._internal.common_device_type import get_device_type_test_bases
device_suffixes = [x.device_type for x in get_device_type_test_bases()]
dtype_suffixes = [str(dt)[len("torch."):] for dt in get_all_dtypes()]
test_name_chunks = test_name.split("_")
if len(test_name_chunks) > 0 and test_name_chunks[-1] in dtype_suffixes:
if len(test_name_chunks) > 1 and test_name_chunks[-2] in device_suffixes:
return "_".join(test_name_chunks[0:-2])
return "_".join(test_name_chunks[0:-1])
return test_name
def check_if_enable(test: unittest.TestCase):
test_suite = str(test.__class__).split('\'')[1]
if "USING_PYTEST" in os.environ:
test_suite = f"__main__.{test_suite.split('.')[1]}"
raw_test_name = f'{test._testMethodName} ({test_suite})'
if raw_test_name in json.loads(os.environ.get("SLOW_TESTS_DICT", "{}")):
getattr(test, test._testMethodName).__dict__['slow_test'] = True
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
sanitized_test_method_name = remove_device_and_dtype_suffixes(test._testMethodName)
if not IS_SANDCASTLE:
disabled_tests_dict = {}
if "DISABLED_TESTS_DICT" in os.environ:
disabled_tests_dict = json.loads(os.environ["DISABLED_TESTS_DICT"])
elif IMPORT_DISABLED_TESTS and os.path.exists(IMPORT_DISABLED_TESTS):
with open(IMPORT_DISABLED_TESTS, 'r') as fp:
disabled_tests_dict = json.loads(fp.read())
else:
# IMPORT_DISABLED_TESTS can be None here
print(f'[WARNING] Fail to load {IMPORT_DISABLED_TESTS}, no test will be skipped')
for disabled_test, (issue_url, platforms) in disabled_tests_dict.items():
disable_test_parts = disabled_test.split()
if len(disable_test_parts) > 1:
disabled_test_name = disable_test_parts[0]
disabled_test_suite = disable_test_parts[1][1:-1]
# if test method name or its sanitized version exactly matches the disabled test method name
# AND allow non-parametrized suite names to disable parametrized ones (TestSuite disables TestSuiteCPU)
if (test._testMethodName == disabled_test_name or sanitized_test_method_name == disabled_test_name) \
and disabled_test_suite in test_suite:
platform_to_conditional: Dict = {
"mac": IS_MACOS,
"macos": IS_MACOS,
"win": IS_WINDOWS,
"windows": IS_WINDOWS,
"linux": IS_LINUX,
"rocm": TEST_WITH_ROCM,
"asan": TEST_WITH_ASAN
}
if platforms == [] or any([platform_to_conditional[platform] for platform in platforms]):
skip_msg = f"Test is disabled because an issue exists disabling it: {issue_url}" \
f" for {'all' if platforms == [] else ''}platform(s) {', '.join(platforms)}. " \
"If you're seeing this on your local machine and would like to enable this test, " \
"please make sure CI is not set and you are not using the flag --import-disabled-tests."
raise unittest.SkipTest(skip_msg)
if TEST_SKIP_FAST:
if not getattr(test, test._testMethodName).__dict__.get('slow_test', False):
raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST")
# `TestCase.assertEqual` is very permissive and coerced the inputs into a format that could be compared. This is very
# convenient when writing tests, but not so much while reviewing them. By default, the comparison `Pair` framework of
# `torch.testing._comparison.assert_equal`, used for example by the public testing function
# `torch.testing.assert_close`, is more strict. In order to use the same framework and thus reduce the divergence
# between internal and external comparison logic as much as possible, we define some "relaxed" pairs here. They only
# change the supported inputs, but the comparison logic is the same.
# TODO: Revisit the relaxed pairs and check how much work it is to fix the tests that would fail without the relaxation.
class RelaxedBooleanPair(BooleanPair):
"""Pair for boolean-like inputs.
In contrast to the builtin :class:`BooleanPair`, this class also supports one input being a number or a single
element tensor-like.
"""
_supported_number_types = NumberPair(0, 0)._supported_types
def _process_inputs(self, actual, expected, *, id):
# We require only one of the inputs of the inputs to be a boolean and the other can also be a boolean, a
# number, or a single element tensor or array, whereas in default BooleanPair both inputs have to be booleans.
tensor_or_array_types: Tuple[Type, ...] = (torch.Tensor, np.ndarray)
other_supported_types = (*self._supported_types, *self._supported_number_types, *tensor_or_array_types)
if not (
(isinstance(actual, self._supported_types) and isinstance(expected, other_supported_types))
or (isinstance(expected, self._supported_types) and isinstance(actual, other_supported_types))
):
raise UnsupportedInputs()
return [self._to_bool(input, id=id) for input in (actual, expected)]
def _to_bool(self, bool_like, *, id):
if isinstance(bool_like, np.number):
return bool(bool_like.item())
elif type(bool_like) in self._supported_number_types:
return bool(bool_like)
elif isinstance(bool_like, (torch.Tensor, np.ndarray)):
numel = bool_like.numel() if isinstance(bool_like, torch.Tensor) else bool_like.size
if numel > 1:
raise ErrorMeta(
ValueError,
f"Only single element tensor-likes can be compared against a boolean. "
f"Got {numel} elements instead.",
id=id,
)
return bool(bool_like.item())
else:
return super()._to_bool(bool_like, id=id)
class RelaxedNumberPair(NumberPair):
"""Pair for number-like inputs.
In contrast to the builtin :class:`NumberPair`, this class also supports one input being a single element
tensor-like or a :class:`enum.Enum`. (D)Type checks are disabled, meaning comparing 1 to 1.0 succeeds even when
``check_dtype=True`` is passed.
In addition, this class uses looser default tolerances for :class:`float` and :class:`complex` inputs. Also
supports overriding the absolute and relative tolerance through the ``@precisionOverride`` and
``@toleranceOverride`` decorators.
"""
_TYPE_TO_DTYPE = {
int: torch.int64,
float: torch.float32,
complex: torch.complex64,
}
def __init__(
self, actual, expected, *, rtol_override=0.0, atol_override=0.0, check_dtype=None, **other_parameters
) -> None:
super().__init__(actual, expected, check_dtype=False, **other_parameters)
self.rtol = max(self.rtol, rtol_override)
self.atol = max(self.atol, atol_override)
def _process_inputs(self, actual, expected, *, id):
# We require only one of the inputs of the inputs to be a number and the other can also be a number or a single
# element tensor or array, whereas in default NumberPair both inputs have to be numbers.
tensor_or_array_types: Tuple[Type, ...] = (torch.Tensor, np.ndarray)
other_supported_types = (*self._supported_types, *tensor_or_array_types)
if not (
(isinstance(actual, self._supported_types) and isinstance(expected, other_supported_types))
or (isinstance(expected, self._supported_types) and isinstance(actual, other_supported_types))
):
raise UnsupportedInputs()
return [self._to_number(input, id=id) for input in (actual, expected)]
def _to_number(self, number_like, *, id):
if isinstance(number_like, (torch.Tensor, np.ndarray)):
numel = number_like.numel() if isinstance(number_like, torch.Tensor) else number_like.size
if numel > 1:
raise ErrorMeta(
ValueError,
f"Only single element tensor-likes can be compared against a number. "
f"Got {numel} elements instead.",
id=id,
)
number = number_like.item()
if isinstance(number, bool):
number = int(number)
return number
elif isinstance(number_like, Enum):
return int(number_like) # type: ignore[call-overload]
else:
return super()._to_number(number_like, id=id)
class TensorOrArrayPair(TensorLikePair):
"""Pair for tensor-like inputs.
On the one hand this class is stricter than the builtin :class:`TensorLikePair` since it only allows instances of
:class:`torch.Tensor` and :class:`numpy.ndarray` rather than allowing any tensor-like than can be converted into a
tensor. On the other hand this class is looser since it converts all inputs into tensors with no regard of their
relationship, e.g. comparing a :class:`torch.Tensor` to :class:`numpy.ndarray` is fine.
In addition, this class supports overriding the absolute and relative tolerance through the ``@precisionOverride``
and ``@toleranceOverride`` decorators.
"""
def __init__(self, actual, expected, *, rtol_override=0.0, atol_override=0.0, **other_parameters):
super().__init__(actual, expected, **other_parameters)
self.rtol = max(self.rtol, rtol_override)
self.atol = max(self.atol, atol_override)
def _process_inputs(self, actual, expected, *, id, allow_subclasses):
self._check_inputs_isinstance(actual, expected, cls=(torch.Tensor, np.ndarray))
actual, expected = [self._to_tensor(input) for input in (actual, expected)]
for tensor in (actual, expected):
self._check_supported(tensor, id=id)
return actual, expected
class UnittestPair(Pair):
"""Fallback ABC pair that handles non-numeric inputs.
To avoid recreating the mismatch messages of :meth:`unittest.TestCase.assertEqual`, this pair simply wraps it in
order to use it with the :class:`Pair` "framework" from :func:`assert_equal`.
Define the :attr:`UnittestPair.CLS` in a subclass to indicate which class(es) of the inputs the pair should support.
"""
CLS: Union[Type, Tuple[Type, ...]]
TYPE_NAME: Optional[str] = None
def __init__(self, actual, expected, **other_parameters):
self._check_inputs_isinstance(actual, expected, cls=self.CLS)
super().__init__(actual, expected, **other_parameters)
def compare(self):
test_case = unittest.TestCase()
try:
return test_case.assertEqual(self.actual, self.expected)
except test_case.failureException as error:
msg = str(error)
type_name = self.TYPE_NAME or (self.CLS if isinstance(self.CLS, type) else self.CLS[0]).__name__
raise self._make_error_meta(AssertionError, f"{type_name.title()} comparison failed: {msg}")
class StringPair(UnittestPair):
CLS = string_classes
TYPE_NAME = "string"
class SetPair(UnittestPair):
CLS = set
class TypePair(UnittestPair):
CLS = type
class ObjectPair(UnittestPair):
CLS = object
# This implements a variant of assertRaises/assertRaisesRegex where we first test
# if the exception is NotImplementedError, and if so just skip the test instead
# of failing it.
#
# This is implemented by inheriting from the (private) implementation of
# assertRaises from unittest.case, and slightly tweaking it for this new
# behavior. The year is 2021: this private class hierarchy hasn't changed since
# 2010, seems low risk to inherit from.
class AssertRaisesContextIgnoreNotImplementedError(unittest.case._AssertRaisesContext):
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None and issubclass(exc_type, NotImplementedError):
self.test_case.skipTest(f"not_implemented: {exc_value}") # type: ignore[attr-defined]
return super().__exit__(exc_type, exc_value, tb)
@contextmanager
def set_warn_always_context(new_val: bool):
old_val = torch.is_warn_always_enabled()
torch.set_warn_always(new_val)
try:
yield
finally:
torch.set_warn_always(old_val)
class TestCase(expecttest.TestCase):
# NOTE: "precision" lets classes and generated tests set minimum
# atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for
# example.
# NOTE: "rel_tol" lets classes and generated tests set minimum
# rtol values when comparing tensors. Used by @toleranceOverride, for example.
_precision: float = 0
_rel_tol: float = 0
# checker to early terminate test suite if unrecoverable failure occurs.
def _should_stop_test_suite(self):
if torch.cuda.is_initialized():
# CUDA device side error will cause subsequence test cases to fail.
# stop entire test suite if catches RuntimeError during torch.cuda.synchronize().
try:
torch.cuda.synchronize()
except RuntimeError as rte:
print("TEST SUITE EARLY TERMINATION due to torch.cuda.synchronize() failure", file=sys.stderr)
return True
return False
else:
return False
@property
def precision(self) -> float:
return self._precision
@precision.setter
def precision(self, prec: float) -> None:
self._precision = prec
@property
def rel_tol(self) -> float:
return self._rel_tol
@rel_tol.setter
def rel_tol(self, prec: float) -> None:
self._rel_tol = prec
_do_cuda_memory_leak_check = False
_do_cuda_non_default_stream = False
# When True, if a test case raises a NotImplementedError, instead of failing
# the test, skip it instead.
_ignore_not_implemented_error = False
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is not None:
# Wraps the tested method if we should do CUDA memory check.
if not TEST_SKIP_CUDA_MEM_LEAK_CHECK:
self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True)
# FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044
if self._do_cuda_memory_leak_check and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)
# Wraps the tested method if we should enforce non default CUDA stream.
self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True)
if self._do_cuda_non_default_stream and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)
if self._ignore_not_implemented_error:
self.wrap_with_policy(method_name, lambda: skip_exception_type(NotImplementedError))
def assertLeaksNoCudaTensors(self, name=None):
name = self.id() if name is None else name
return CudaMemoryLeakCheck(self, name)
def enforceNonDefaultStream(self):
return CudaNonDefaultStream()
def wrap_with_cuda_policy(self, method_name, policy):
test_method = getattr(self, method_name)
# the import below may initialize CUDA context, so we do it only if
# self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream
# is True.
# TODO: sure looks like we unconditionally initialize the context here
# -- ezyang
from torch.testing._internal.common_cuda import TEST_CUDA
fullname = self.id().lower() # class_name.method_name
if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname):
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
def wrap_with_policy(self, method_name, policy):
test_method = getattr(self, method_name)
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
# A policy is a zero-argument function that returns a context manager.
# We don't take the context manager directly as it may be necessary to
# construct it once per test method
def wrap_method_with_policy(self, method, policy):
# Assumes that `method` is the tested function in `self`.
# NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope
# alive, so this cannot be done in setUp and tearDown because
# tearDown is run unconditionally no matter whether the test
# passes or not. For the same reason, we can't wrap the `method`
# call in try-finally and always do the check.
@wraps(method)
def wrapper(self, *args, **kwargs):
with policy():
method(*args, **kwargs)
return types.MethodType(wrapper, self)
def wrap_with_cuda_memory_check(self, method):
return self.wrap_method_with_policy(method, self.assertLeaksNoCudaTensors)
# Recursive function that incorporates retry logic when PYTORCH_RETRY_TEST_CASES=1 and enables early test
# termination. [DISCLAIMER: ONLY WORKS WITH UNITTEST]
# When report_only is True, flaky tests are only reported, but the signal remains the same (the test will still
# show up red).
# Otherwise, the flaky test will show up green while its stats are captured by test reports.
def _run_with_retry(self, result=None, num_runs_left=0, report_only=True, num_red=0, num_green=0):
using_unittest = isinstance(result, unittest.TestResult)
if num_runs_left == 0:
if num_green > 0 and num_red > 0 and using_unittest:
result.addSkip(self, f'{{"flaky": {True}, "num_red": {num_red}, "num_green": {num_green},' +
f'"max_num_retries": {MAX_NUM_RETRIES}}}')
return
if using_unittest:
failures_before = 0 if result is None else len(result.failures) # num tests marked as failed before starting
errors_before = 0 if result is None else len(result.errors) # num tests marked as errored before starting
if TEST_WITH_TORCHDYNAMO:
with torchdynamo.optimize("eager"):
super().run(result=result)
# TODO - Reset for each test slows down testing significantly.
# torchdynamo.reset()
else:
super().run(result=result)
# Early terminate test if necessary.
if self._should_stop_test_suite():
if result.wasSuccessful():
case = TestCase()
if TEST_SAVE_XML is not None:
# This is a big hacky, XMLRunner modifies expected type from TestCase to TestInfo
# Create dummy TestInfo to record results correctly
from xmlrunner.result import _TestInfo # type: ignore[import]
case = _TestInfo(result, case)
case.output = _TestInfo.ERROR
case.elapsed_time = 0.0
case.test_description = "TestSuiteEarlyFailure"
# This shouldn't really happen, but if does add fake failure
# For more details see https://github.com/pytorch/pytorch/issues/71973
result.failures.append((case, "TestSuite execution was aborted early"))
assert result.wasSuccessful() is False
result.stop()
if not RETRY_TEST_CASES or not using_unittest:
return
err = sys.exc_info()
num_retries_left = num_runs_left - 1
if failures_before < len(result.failures):
print(f" {self._testMethodName} failed - num_retries_left: {num_retries_left}")
if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0):
_, traceback_str = result.failures.pop(-1)
print(traceback_str)
result.addExpectedFailure(self, err)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only,
num_red=num_red + 1, num_green=num_green)
elif errors_before < len(result.errors):
print(f" {self._testMethodName} errored - num_retries_left: {num_retries_left}")
if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0):
_, traceback_str = result.errors.pop(-1)
print(traceback_str)
result.addExpectedFailure(self, err)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only,
num_red=num_red + 1, num_green=num_green)
elif report_only and num_retries_left < MAX_NUM_RETRIES:
print(f" {self._testMethodName} succeeded - num_retries_left: {num_retries_left}")
result.addUnexpectedSuccess(self)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only,
num_red=num_red, num_green=num_green + 1)
elif not report_only and num_retries_left < MAX_NUM_RETRIES:
# in this case, our test was rerun (as a retry has been used) and it just passed.
# we incur one more recursive call with num_runs_left = 0 to allow for accurate flaky reporting
self._run_with_retry(result=result, num_runs_left=0, report_only=report_only,
num_red=num_red, num_green=num_green + 1)
def run(self, result=None):
with contextlib.ExitStack() as stack:
if TEST_WITH_CROSSREF:
stack.enter_context(CrossRefMode())
num_runs = MAX_NUM_RETRIES + 1 if RETRY_TEST_CASES else 1
self._run_with_retry(
result=result,
num_runs_left=num_runs,
report_only=not OVERRIDE_FLAKY_SIGNAL,
num_red=0,
num_green=0)
def setUp(self):
check_if_enable(self)
set_rng_seed(SEED)
@staticmethod
def _make_crow_indices(n_rows, n_cols, nnz,
*, device, dtype, random=True):
"""Return crow_indices of a CSR tensor with size (n_rows, n_cols) and
the number of specified elements nnz.
If random is True, the column counts of rows are in random
order. Otherwise, the column counts of rows are defined by the
used sampling method.
Sampling method
---------------
The used sampling method was introduced in
https://pearu.github.io/csr_sampling.html, and here we give
only an overall description of the method.
Notice that crow_indices can be defined as cumsum(counts)
where counts is a sequence of non-negative integers satisfying
the following conditions:
len(counts) == n_rows + 1
counts.max() <= n_cols
while counts[i + 1] is interpreted as the number of specified
elements in the i-th row.
The used sampling method aims at increasing the diversity of
CSR samples, that is, a CSR sample should contain (i) rows
that are all filled, (ii) rows with no elements at all, and
(iii) rows that are partially filled. At the same time and for
the given total number of specified elements (nnz), there
should be minimal preference to rows with a given number of
elements. To achieve this, the sampling method is built-up on
using a sawteeth model for counts. In the simplest case, we
would have
counts = arange(n_rows + 1) % (n_cols + 1)
that has equal number of all possible column counts per row.
This formula can be used only for specific input values of
n_rows, n_cols, and nnz. To generalize this model to any
combinations of inputs, the counts model above is extended
with an incomplete sawtooth, and the right and lower
rectangular parts that will guarantee that
counts.sum() == nnz
for any combination of n_rows, n_cols, and nnz. Basically,
we'll find a maximal window in (n_rows + 1, n_cols + 1)-grid
that is able to hold a sequence of sawteeth and so-called
final correction, while the external part of the window is
filled with counts to meet the nnz contraint exactly.
"""
assert 0 <= nnz <= n_rows * n_cols, (nnz, n_rows, n_cols)
def sawteeth(n, m):
# return the total number of counts in the sequence of
# sawteeth where n and m define a window in (n_rows+1,
# n_cols+1) rectangle where the sequence of sawteeth
# perfectly fit.
M = (n_cols - m) * (n_cols - m + 1) // 2
K = (n_rows - n) % (n_cols - m + 1)
return M * ((n_rows - n) // (n_cols - m + 1)) + K * (K - 1) // 2
# Different from the original method description, here counts
# has leading 0 required by crow_indices:
counts = torch.zeros(n_rows + 1, dtype=dtype, device=torch.device('cpu'))
n = m = 0
N = sawteeth(n, m)
if N and nnz >= max(N, n_cols):
# determine the width of the sawteeth window. We use bisection to solve
# N(n, 0) == 0 or nnz - n * n_cols < max(N(n, 0), n_cols)
# for n
n_left = n
n_right = n_rows - 1
N_right = sawteeth(n_right, m)
while n_right - n_left > 1:
n_middle = (n_left + n_right) // 2
N_middle = sawteeth(n_middle, m)
if N_middle == 0 or nnz - n_middle * n_cols < max(N_middle, n_cols):
n_right, N_right = n_middle, N_middle
else:
n_left = n_middle
n, N = n_right, N_right
# fill the right rectangle with counts:
assert n
counts[-n:].fill_(n_cols)
if N and nnz - n * n_cols >= max(N, n_rows - n):
# determine the height of the sawteeth window. We use bisection to solve
# N(n, m) == 0 or nnz - n * n_cols - m * (n_rows - n) < max(N(n, m), n_rows - n)
# for m.
m_left = m
m_right = n_cols - 1
N_right = sawteeth(n, m_right)
while m_right - m_left > 1:
m_middle = (m_left + m_right) // 2
N_middle = sawteeth(n, m_middle)
if N_middle == 0 or nnz - n * n_cols - m_middle * (n_rows - n) < max(N_middle, n_rows - n):
m_right, N_right = m_middle, N_middle
else:
m_left = m_middle
m, N = m_right, N_right
# fill the bottom rectangle with counts:
assert m
counts[1:n_rows - n + 1].fill_(m)
if N:
# fill the sawteeth window with counts
q, r = divmod(nnz - n * n_cols - m * (n_rows - n),
(n_cols - m) * (n_cols - m + 1) // 2)
p = 1 + q * (n_cols - m + 1)
if sys.version_info >= (3, 8):
k = math.isqrt(2 * r)
else:
# math.isqrt(x) is available starting from Python 3.8.
# Here we use int(math.sqrt(x)) as an approximation
# that appers to give exaxt result for all x values
# less than 2**35, at least, the upper limit of x is
# TBD.
k = int(math.sqrt(2 * r))
if k * (k + 1) > 2 * r:
k -= 1
corr = r - k * (k + 1) // 2
assert not ((p > 1) and (m > 0)) # full sawteeth are never on top of a bottom rectangle
# sequence of full sawteeth:
counts[1:p] = torch.arange(p - 1, dtype=dtype, device=counts.device) % (n_cols - m + 1)
# incomplete sawtooth:
counts[p:p + k + 1] += torch.arange(k + 1, dtype=dtype, device=counts.device)
else:
# given input does not support sawteeth
p = 1
corr = nnz - n * n_cols - m * (n_rows - n)
# correction that will guarantee counts.sum() == nnz:
counts[p] += corr
if random:
# randomize crow_indices by shuffling the sawteeth
# sequence:
perm = torch.randperm(n_rows, device=counts.device)
counts[1:] = counts[1:][perm]
# compute crow_indices:
crow_indices = counts
crow_indices.cumsum_(dim=0)
return crow_indices.to(device=device)
def genSparseCompressedTensor(self, size, nnz, *, layout, device, dtype, index_dtype, blocksize=(), dense_dims=0):
from operator import mul
from functools import reduce
sparse_dim = 2
assert all(size[d] > 0 for d in range(len(size))) or nnz == 0, 'invalid arguments'
assert len(size) >= sparse_dim
if blocksize:
assert len(blocksize) == 2, (size, blocksize)
assert size[-2 - dense_dims] % blocksize[0] == 0, (size, blocksize)
assert size[-1 - dense_dims] % blocksize[1] == 0, (size, blocksize)
blocksize0, blocksize1 = blocksize
else:
blocksize0 = blocksize1 = 1
size = tuple(size)
dense_size = size[(len(size) - dense_dims):]
def random_sparse_compressed(n_compressed_dims, n_plain_dims, nnz):
compressed_indices = self._make_crow_indices(n_compressed_dims, n_plain_dims, nnz, device=device, dtype=index_dtype)
plain_indices = torch.zeros(nnz, dtype=index_dtype, device=device)
for i in range(n_compressed_dims):
count = compressed_indices[i + 1] - compressed_indices[i]
plain_indices[compressed_indices[i]:compressed_indices[i + 1]], _ = torch.sort(
torch.randperm(n_plain_dims, dtype=index_dtype, device=device)[:count])
low = -1 if dtype != torch.uint8 else 0
high = 1 if dtype != torch.uint8 else 2
values = make_tensor((nnz,) + blocksize + dense_size, device=device, dtype=dtype, low=low, high=high)
return values, compressed_indices, plain_indices
batch_shape = size[:-2 - dense_dims]
n_batch = reduce(mul, batch_shape, 1)
if layout in {torch.sparse_csr, torch.sparse_bsr}:
n_compressed_dims, n_plain_dims = size[-2 - dense_dims] // blocksize0, size[-1 - dense_dims] // blocksize1
else:
n_compressed_dims, n_plain_dims = size[-1 - dense_dims] // blocksize1, size[-2 - dense_dims] // blocksize0
blocknnz = nnz // (blocksize0 * blocksize1)
sparse_tensors = [random_sparse_compressed(n_compressed_dims, n_plain_dims, blocknnz) for _ in range(n_batch)]
sparse_tensors_it = map(list, zip(*sparse_tensors))
values = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, blocknnz, *blocksize, *dense_size)
compressed_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1)
plain_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1)
return torch.sparse_compressed_tensor(compressed_indices, plain_indices,
values, size=size, dtype=dtype, layout=layout, device=device)
def genSparseCSRTensor(self, size, nnz, *, device, dtype, index_dtype, dense_dims=0):
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csr, device=device,
dtype=dtype, index_dtype=index_dtype, blocksize=(), dense_dims=dense_dims)
def genSparseCSCTensor(self, size, nnz, *, device, dtype, index_dtype, dense_dims=0):
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csc, device=device,
dtype=dtype, index_dtype=index_dtype, blocksize=(), dense_dims=0)
def genSparseBSRTensor(self, size, blocksize, nnz, *, device, dtype, index_dtype, dense_dims=0):
assert len(blocksize) == 2
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsr, device=device,
dtype=dtype, index_dtype=index_dtype, blocksize=blocksize, dense_dims=dense_dims)
def genSparseBSCTensor(self, size, blocksize, nnz, *, device, dtype, index_dtype, dense_dims=0):
assert len(blocksize) == 2
return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsc, device=device,
dtype=dtype, index_dtype=index_dtype, blocksize=blocksize, dense_dims=dense_dims)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device, dtype):
# Assert not given impossible combination, where the sparse dims have
# empty numel, but nnz > 0 makes the indices containing values.
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
v = make_tensor(v_size, device=device, dtype=dtype, low=-1, high=1)
i = torch.rand(sparse_dim, nnz, device=device)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if is_uncoalesced:
i1 = i[:, :(nnz // 2), ...]
i2 = i[:, :((nnz + 1) // 2), ...]
i = torch.cat([i1, i2], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size), dtype=dtype, device=device)
if not is_uncoalesced:
x = x.coalesce()
else:
# FIXME: `x` is a sparse view of `v`. Currently rebase_history for
# sparse views is not implemented, so this workaround is
# needed for inplace operations done on `x`, e.g., copy_().
# Remove after implementing something equivalent to CopySlice
# for sparse views.
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards
x = x.detach().clone()
return x, x._indices().clone(), x._values().clone()
def safeToDense(self, t):
# coalesce is only implemented for COO
if t.layout == torch.sparse_coo:
t = t.coalesce()
return t.to_dense()
# Compares a torch function with a reference function for a given sample input (object of SampleInput)
# Note: only values are compared, type comparison is not done here
def compare_with_reference(self, torch_fn, ref_fn, sample_input, **kwargs):
numpy_sample = sample_input.numpy()
n_inp, n_args, n_kwargs = numpy_sample.input, numpy_sample.args, numpy_sample.kwargs
t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs
actual = torch_fn(t_inp, *t_args, **t_kwargs)
expected = ref_fn(n_inp, *n_args, **n_kwargs)
self.assertEqual(actual, expected, exact_device=False, **kwargs)
# Compares the given Torch and NumPy functions on the given tensor-like object.
# NOTE: both torch_fn and np_fn should be functions that take a single
# tensor (array). If the torch and/or NumPy function require additional
# arguments then wrap the function in a lambda or pass a partial function.
# TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol)
def compare_with_numpy(self, torch_fn, np_fn, tensor_like,
device=None, dtype=None, **kwargs):
assert TEST_NUMPY
if isinstance(tensor_like, torch.Tensor):
assert device is None
assert dtype is None
t_cpu = tensor_like.detach().cpu()
if t_cpu.dtype is torch.bfloat16:
t_cpu = t_cpu.float()
a = t_cpu.numpy()
t = tensor_like
else:
d = copy.copy(torch_to_numpy_dtype_dict)
d[torch.bfloat16] = np.float32
a = np.array(tensor_like, dtype=d[dtype])
t = torch.tensor(tensor_like, device=device, dtype=dtype)
np_result = np_fn(a)
torch_result = torch_fn(t).cpu()
# Converts arrays to tensors
if isinstance(np_result, np.ndarray):
try:
np_result = torch.from_numpy(np_result)
except Exception:
# NOTE: copying an array before conversion is necessary when,
# for example, the array has negative strides.
np_result = torch.from_numpy(np_result.copy())
if t.dtype is torch.bfloat16 and torch_result.dtype is torch.bfloat16 and np_result.dtype is torch.float:
torch_result = torch_result.to(torch.float)
self.assertEqual(np_result, torch_result, **kwargs)
def assertEqualIgnoreType(self, *args, **kwargs) -> None:
# If you are seeing this function used, that means test is written wrongly
# and deserves detailed investigation
return self.assertEqual(*args, exact_dtype=False, **kwargs)
def assertEqualBroadcasting(self, x, y, *args, **kwargs) -> None:
r"""Tests if tensor x equals to y, if y to be broadcast to x.shape.
"""
if not isinstance(y, Iterable):
# int, float, etc. or different shape tensors
y = torch.ones_like(x) * y
if not isinstance(y, torch.Tensor):
# iterable, but not a tensor
y = torch.ones_like(x) * torch.tensor(y)
return self.assertEqual(x, y, *args, **kwargs)
def assertEqual(
self,
x,
y,
msg: Optional[Union[str, Callable[[str], str]]] = None,
*,
atol: Optional[float] = None,
rtol: Optional[float] = None,
equal_nan=True,
exact_dtype=True,
# TODO: default this to True
exact_device=False,
exact_layout=False,
exact_stride=False,
exact_is_coalesced=False
):
# Hide this function from `pytest`'s traceback
__tracebackhide__ = True
# numpy's dtypes are a superset of what PyTorch supports. In case we encounter an unsupported dtype, we fall
# back to an elementwise comparison. Note that this has to happen here and not for example in
# `TensorOrArrayPair`, since at that stage we can no longer split the array into its elements and perform
# multiple comparisons.
if any(
isinstance(input, np.ndarray) and not has_corresponding_torch_dtype(input.dtype) for input in (x, y)
):
def to_list(input):
return input.tolist() if isinstance(input, (torch.Tensor, np.ndarray)) else list(input)
x = to_list(x)
y = to_list(y)
# When comparing a sequence of numbers to a tensor, we need to convert the sequence to a tensor here.
# Otherwise, the pair origination of `assert_equal` will fail, because the sequence is recognized as container
# that should be checked elementwise while the tensor is not.
elif isinstance(x, torch.Tensor) and isinstance(y, Sequence):
y = torch.as_tensor(y, dtype=x.dtype, device=x.device)
elif isinstance(x, Sequence) and isinstance(y, torch.Tensor):
x = torch.as_tensor(x, dtype=y.dtype, device=y.device)
# If x or y are tensors and nested then we unbind them to a list of tensors this should allow us to compare
# a nested tensor to a nested tensor and a nested tensor to a list of expected tensors
if isinstance(x, torch.Tensor) and x.is_nested:
x = x.unbind()
if isinstance(y, torch.Tensor) and y.is_nested:
y = y.unbind()
assert_equal(
x,
y,
pair_types=(
NonePair,
RelaxedBooleanPair,
RelaxedNumberPair,
TensorOrArrayPair,
StringPair,
SetPair,
TypePair,
ObjectPair,
),
sequence_types=(
Sequence,
torch.storage.TypedStorage,
Sequential,
ModuleList,
ParameterList,
ScriptList,
torch.utils.data.dataset.Subset,
),
mapping_types=(Mapping, ModuleDict, ParameterDict, ScriptDict),
rtol=rtol,
rtol_override=self.rel_tol,
atol=atol,
atol_override=self.precision,
equal_nan=equal_nan,
check_device=exact_device,
check_dtype=exact_dtype,
check_layout=exact_layout,
check_stride=exact_stride,
check_is_coalesced=exact_is_coalesced,
# This emulates unittest.TestCase's behavior if a custom message passed and
# TestCase.longMessage (https://docs.python.org/3/library/unittest.html#unittest.TestCase.longMessage)
# is True (default)
msg=(lambda generated_msg: f"{generated_msg} : {msg}") if isinstance(msg, str) and self.longMessage else msg,
)
def assertNotEqual(self, x, y, msg: Optional[str] = None, *, # type: ignore[override]
atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None:
with self.assertRaises(AssertionError, msg=msg):
self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs)
def assertEqualTypeString(self, x, y) -> None:
# This API is used simulate deprecated x.type() == y.type()
self.assertEqual(x.device, y.device)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.is_sparse, y.is_sparse)
def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None:
for elem in iterable:
if id(obj) == id(elem):
return
raise AssertionError("object not found in iterable")
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaises(self, expected_exception, *args, **kwargs):
if self._ignore_not_implemented_error:
context: Optional[AssertRaisesContextIgnoreNotImplementedError] = \
AssertRaisesContextIgnoreNotImplementedError(expected_exception, self) # type: ignore[call-arg]
try:
return context.handle('assertRaises', args, kwargs) # type: ignore[union-attr]
finally:
# see https://bugs.python.org/issue23890
context = None
else:
return super().assertRaises(expected_exception, *args, **kwargs)
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs):
# Verifies that an exception with the type expected_exception and message
# matching the regular expression defined by expected_regex is thrown.
# If the test is instantiated for a non-native device type (like XLA)
# then the message is not validated.
# Checks whether the test is instantiated for a device type by testing
# if the test class has defined the device_type attribute and,
# if so, tests whether the instantiated device type is native or not
if hasattr(self, 'device_type') and self.device_type not in NATIVE_DEVICES: # type: ignore[attr-defined]
# empty string matches any string
expected_regex = ''
if self._ignore_not_implemented_error:
context = AssertRaisesContextIgnoreNotImplementedError( # type: ignore[call-arg]
expected_exception, self, expected_regex)
return context.handle('assertRaisesRegex', args, kwargs) # type: ignore[attr-defined]
else:
return super().assertRaisesRegex(expected_exception, expected_regex, *args, **kwargs)
# TODO: Support context manager interface
# NB: The kwargs forwarding to callable robs the 'subname' parameter.
# If you need it, manually apply your callable in a lambda instead.
def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):
subname = None
if 'subname' in kwargs:
subname = kwargs['subname']
del kwargs['subname']
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertExpected(str(e), subname)
return
# Don't put this in the try block; the AssertionError will catch it
self.fail(msg="Did not raise when expected to")
def assertNotWarn(self, callable, msg=''):
r"""
Test if :attr:`callable` does not raise a warning.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
callable()
self.assertTrue(len(ws) == 0, msg)
@contextmanager
def assertWarnsOnceRegex(self, category, regex=''):
"""Context manager for code that *must always* warn
This filters expected warnings from the test and fails if
the expected warning is not caught. It uses set_warn_always() to force
TORCH_WARN_ONCE to behave like TORCH_WARN
"""
pattern = re.compile(regex)
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
yield
if len(ws) == 0:
self.fail('no warning caught')
self.assertTrue(any([type(w.message) is category for w in ws]))
self.assertTrue(
any([re.match(pattern, str(w.message)) for w in ws]),
f'{pattern}, {[w.message for w in ws if type(w.message) is category]}')
def assertExpected(self, s, subname=None):
r"""
Test that a string matches the recorded contents of a file
derived from the name of this test and subname. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using --accept.
If you call this multiple times in a single function, you must
give a unique subname each time.
"""
if not isinstance(s, str):
raise TypeError("assertExpected is strings only")
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives. This doesn't matter in
# PyTorch where all test scripts are in the same directory as
# test/common_utils.py, but it matters in onnx-pytorch
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
"expect",
munged_id)
subname_output = ""
if subname:
expected_file += "-" + subname
subname_output = " ({})".format(subname)
expected_file += ".expect"
expected = None
def accept_output(update_type):
print("Accepting {} for {}{}:\n\n{}".format(update_type, munged_id, subname_output, s))
with open(expected_file, 'w') as f:
# Adjust for producer_version, leave s unmodified
s_tag = re.sub(r'(producer_version): "[0-9.]*"',
r'\1: "CURRENT_VERSION"', s)
f.write(s_tag)
try:
with open(expected_file) as f:
expected = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
elif expecttest.ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
("I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id)) from None
# a hack for JIT tests
if IS_WINDOWS:
expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected)
s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s)
# Adjust for producer_version
expected = expected.replace(
'producer_version: "CURRENT_VERSION"',
'producer_version: "{}"'.format(torch.onnx.producer_version)
)
if expecttest.ACCEPT:
if expected != s:
return accept_output("updated output")
else:
if hasattr(self, "assertMultiLineEqual"):
# Python 2.7 only
# NB: Python considers lhs "old" and rhs "new".
self.assertMultiLineEqual(expected, s)
else:
self.assertEqual(s, expected)
def assertExpectedStripMangled(self, s, subname=None):
s = re.sub(r'__torch__[^ ]+', '', s)
self.assertExpected(s, subname)
def assertGreaterAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Assert that ``first`` is greater than or almost equal to ``second``.
The equality of ``first`` and ``second`` is determined in a similar way to
the ``assertAlmostEqual`` function of the standard library.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if first >= second:
return
diff = second - first
if delta is not None:
if diff <= delta:
return
standardMsg = f"{first} not greater than or equal to {second} within {delta} delta"
else:
if places is None:
places = 7
if round(diff, places) == 0:
return
standardMsg = f"{first} not greater than or equal to {second} within {places} places"
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertAtenOp(self, onnx_model, operator, overload_name=""):
all_aten_nodes = [p for p in onnx_model.graph.node
if p.op_type == "ATen" and p.domain == "org.pytorch.aten"]
self.assertTrue(all_aten_nodes)
for op in all_aten_nodes:
attrs = {attr.name: attr.s.decode() for attr in op.attribute}
if attrs.get("operator") == operator:
break
self.assertEqual(attrs["operator"], operator)
self.assertEqual(attrs.get("overload_name", ""), overload_name)
# run code in subprocess and capture exceptions.
@staticmethod
def run_process_no_exception(code, env=None):
import subprocess
popen = subprocess.Popen(
[sys.executable, '-c', code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
(stdout, stderr) = popen.communicate()
return (stdout, stderr)
# returns captured stderr
@staticmethod
def runWithPytorchAPIUsageStderr(code):
env = os.environ.copy()
env["PYTORCH_API_USAGE_STDERR"] = "1"
# remove CI flag since this is a wrapped test process.
# CI flag should be set in the parent process only.
if "CI" in env.keys():
del env["CI"]
(stdout, stderr) = TestCase.run_process_no_exception(code, env=env)
return stderr.decode('ascii')
def download_file(url, binary=True):
from urllib.parse import urlsplit
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))
path = os.path.join(data_dir, filename)
if os.path.exists(path):
return path
try:
data = request.urlopen(url, timeout=15).read()
with open(path, 'wb' if binary else 'w') as f:
f.write(data)
return path
except error.URLError as e:
msg = "could not download test file '{}'".format(url)
warnings.warn(msg, RuntimeWarning)
raise unittest.SkipTest(msg) from e
def find_free_port():
"""
Finds an available port and returns that port number.
NOTE: If this function is being used to allocate a port to Store (or
indirectly via init_process_group or init_rpc), it should be used
in conjuction with the `retry_on_connect_failures` decorator as there is a potential
race condition where the allocated port may become unavailable before it can be used
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', 0))
_, port = sock.getsockname()
return port
# Errors that we can get in c10d initialization for which we should retry tests for.
ADDRESS_IN_USE = "Address already in use"
CONNECT_TIMEOUT = "connect() timed out."
def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)):
"""Reruns a test if the test returns a RuntimeError and the exception
contains one of the strings in connect_errors."""
# This if block is executed when using this function as a decorator with arguments.
if func is None:
return partial(retry_on_connect_failures, connect_errors=connect_errors)
@wraps(func)
def wrapper(*args, **kwargs):
n_retries = 10
tries_remaining = n_retries
while True:
try:
return func(*args, **kwargs)
except RuntimeError as error:
if any(connect_error in str(error) for connect_error in connect_errors):
tries_remaining -= 1
if tries_remaining == 0:
raise RuntimeError(f"Failing after {n_retries} retries with error: {str(error)}")
time.sleep(random.random())
continue
raise
return wrapper
# Decorator to retry upon certain Exceptions.
def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False):
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
print(msg)
time.sleep(mdelay)
mtries -= 1
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e
return f_retry # true decorator
return deco_retry
# FIXME: modernize these to be consistent with make_tensor
# and review including them in torch.testing
# Methods for matrix generation
def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'):
assert rank <= l
A = torch.randn(l, l, dtype=dtype, device=device)
u, s, vh = torch.linalg.svd(A, full_matrices=False)
for i in range(l):
if i >= rank:
s[i] = 0
elif s[i] == 0:
s[i] = 1
return (u * s.to(dtype).unsqueeze(-2)) @ vh
def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001):
"""
Returns a random rectangular matrix (batch of matrices)
with singular values sampled from a Gaussian with
mean `mean` and standard deviation `sigma`.
The smaller the `sigma`, the better conditioned
the output matrix is.
"""
primitive_dtype = {
torch.float: torch.float,
torch.double: torch.double,
torch.cfloat: torch.float,
torch.cdouble: torch.double
}
x = torch.rand(shape, dtype=dtype, device=device)
m = x.size(-2)
n = x.size(-1)
u, _, vh = torch.linalg.svd(x, full_matrices=False)
s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \
.sort(-1, descending=True).values.to(dtype)
return (u * s.unsqueeze(-2)) @ vh
# Returns a noncontiguous (tensor with the same shape and values as t
# The noncontiguous tensor is constructed such that elements in the innermost
# dimension are separated by zeros or (whenever possible) nans
# TODO: consider more complicated noncontiguity schemes
def noncontiguous_like(t):
# Short-circuits if t is already noncontiguous
if not t.is_contiguous():
return t
# Special-cases 0-dim tensors
zero_dim = t.ndim == 0
if zero_dim:
t = t.unsqueeze(0)
result = torch.repeat_interleave(t.detach(), 2, dim=-1)
# Choose a "weird" value that won't be accessed
if t.dtype.is_floating_point or t.dtype.is_complex:
value = math.nan
elif t.dtype == torch.bool:
value = True
else:
value = 12
if zero_dim:
result[0] = value
result.set_(result.storage(), 1, (), ())
else:
result[..., 1::2] = value
strides = list(result.stride())
strides[-1] *= 2
result.set_(result.storage(), result.storage_offset(), t.size(), stride=tuple(strides))
result.requires_grad_(t.requires_grad)
return result
# TODO: remove this (prefer make_symmetric_matrices below)
def random_symmetric_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.mT).div_(2)
return A
# Creates a symmetric matrix or batch of symmetric matrices
# Shape must be a square matrix or batch of square matrices
def make_symmetric_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
t = (t + t.mT).div_(2)
return t
def random_hermitian_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.mH).div_(2)
return A
def random_symmetric_psd_matrix(l, *batches, **kwargs):
"""
Returns a batch of random symmetric positive-semi-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> # xdoctest: +SKIP("undefined variables")
>>> matrices = random_symmetric_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
return A @ A.mT
def random_hermitian_psd_matrix(matrix_size, *batch_dims, dtype=torch.double, device='cpu'):
"""
Returns a batch of random Hermitian positive-semi-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> # xdoctest: +SKIP("undefined variables")
>>> matrices = random_hermitian_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device)
return A @ A.mH
# TODO: remove this (prefer make_symmetric_pd_matrices below)
def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return torch.matmul(A, A.mT) \
+ torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5
# Creates a symmetric positive-definite matrix or batch of
# such matrices
def make_symmetric_pd_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
i = torch.eye(shape[-1], device=device, dtype=dtype) * 1e-5
return t @ t.mT + i
def random_hermitian_pd_matrix(matrix_size, *batch_dims, dtype, device):
"""
Returns a batch of random Hermitian positive-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> # xdoctest: +SKIP("undefined variables")
>>> matrices = random_hermitian_pd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return A @ A.mH + torch.eye(matrix_size, dtype=dtype, device=device)
# Creates a full rank matrix with distinct singular values or
# a batch of such matrices
def make_fullrank_matrices_with_distinct_singular_values(*shape, device, dtype, requires_grad=False):
with torch.no_grad():
t = make_tensor(shape, device=device, dtype=dtype)
u, _, vh = torch.linalg.svd(t, full_matrices=False)
real_dtype = t.real.dtype if t.dtype.is_complex else t.dtype
k = min(shape[-1], shape[-2])
# We choose the singular values to be "around one"
# This is to make the matrix well conditioned
# s = [2, 3, ..., k+1]
s = torch.arange(2, k + 2, dtype=real_dtype, device=device)
# s = [2, -3, 4, ..., (-1)^k k+1]
s[1::2] *= -1.
# 1 + 1/s so that the singular values are in the range [2/3, 3/2]
# This gives a condition number of 9/4, which should be good enough
s.reciprocal_().add_(1.)
# Note that the singular values need not be ordered in an SVD so
# we don't need need to sort S
x = (u * s.to(u.dtype)) @ vh
x.requires_grad_(requires_grad)
return x
def random_matrix(rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices.
Parameters:
dtype - the data type
device - the device kind
singular - when True, the output will be singular
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
singular = kwargs.get("singular", False)
if silent and not torch._C.has_lapack:
return torch.ones(rows, columns, dtype=dtype, device=device)
A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device)
if A.numel() == 0:
return A
u, _, vh = torch.linalg.svd(A, full_matrices=False)
k = min(rows, columns)
s = torch.linspace(1 / (k + 1), 1, k, dtype=dtype, device=device)
if singular:
# make matrix singular
s[k - 1] = 0
if k > 2:
# increase the order of singularity so that the pivoting
# in LU factorization will be non-trivial
s[0] = 0
return (u * s.unsqueeze(-2)) @ vh
def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices with
given rank.
"""
B = random_matrix(rows, rank, *batch_dims, **kwargs)
C = random_matrix(rank, columns, *batch_dims, **kwargs)
return B.matmul(C)
def random_sparse_matrix(rows, columns, density=0.01, **kwargs):
"""Return rectangular random sparse matrix within given density.
The density of the result approaches to given density as the size
of the matrix is increased and a relatively small value of density
is specified but higher than min(rows, columns)/(rows * columns)
for non-singular matrices.
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
singular = kwargs.get("singular", False)
k = min(rows, columns)
nonzero_elements = max(min(rows, columns), int(rows * columns * density))
row_indices = [i % rows for i in range(nonzero_elements)]
column_indices = [i % columns for i in range(nonzero_elements)]
random.shuffle(column_indices)
indices = [row_indices, column_indices]
values = torch.randn(nonzero_elements, dtype=dtype, device=device)
# ensure that the diagonal dominates
values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp()
indices_tensor = torch.tensor(indices)
A = torch.sparse_coo_tensor(indices_tensor, values, (rows, columns), device=device)
return A.coalesce()
def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):
"""Return random sparse positive-definite matrix with given density.
The eigenvalues of the matrix are defined as::
arange(1, matrix_size+1)/matrix_size
Algorithm:
A = diag(arange(1, matrix_size+1)/matrix_size)
while <A density is smaller than required>:
<choose random i, j in range(matrix_size), theta in [0, 2*pi]>
R = <rotation matrix (i,j,theta)>
A = R^T A R
"""
import math
torch = kwargs.get('torch', globals()['torch'])
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
data = dict([((i, i), float(i + 1) / matrix_size)
for i in range(matrix_size)])
def multiply(data, N, i, j, cs, sn, left=True):
for k in range(N):
if left:
ik, jk = (k, i), (k, j)
else:
ik, jk = (i, k), (j, k)
aik, ajk = data.get(ik, 0), data.get(jk, 0)
aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk
if aik:
data[ik] = aik
else:
data.pop(ik, None)
if ajk:
data[jk] = ajk
else:
data.pop(jk, None)
target_nnz = density * matrix_size * matrix_size
while len(data) < target_nnz:
i = random.randint(0, matrix_size - 1)
j = random.randint(0, matrix_size - 1)
if i != j:
theta = random.uniform(0, 2 * math.pi)
cs = math.cos(theta)
sn = math.sin(theta)
multiply(data, matrix_size, i, j, cs, sn, left=True)
multiply(data, matrix_size, i, j, cs, sn, left=False)
icoords, jcoords, values = [], [], []
for (i, j), v in sorted(data.items()):
icoords.append(i)
jcoords.append(j)
values.append(v)
indices_tensor = torch.tensor([icoords, jcoords])
return torch.sparse_coo_tensor(indices_tensor, values, (matrix_size, matrix_size), dtype=dtype, device=device)
# FIXME: remove this by updating test suites using it
def do_test_dtypes(self, dtypes, layout, device):
for dtype in dtypes:
if dtype != torch.float16:
out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device)
self.assertIs(dtype, out.dtype)
self.assertIs(layout, out.layout)
self.assertEqual(device, out.device)
# FIXME: remove this by updating test suites using it
def do_test_empty_full(self, dtypes, layout, device):
shape = torch.Size([2, 3])
def check_value(tensor, dtype, layout, device, value, requires_grad):
self.assertEqual(shape, tensor.shape)
self.assertIs(dtype, tensor.dtype)
self.assertIs(layout, tensor.layout)
self.assertEqual(tensor.requires_grad, requires_grad)
if tensor.is_cuda and device is not None:
self.assertEqual(device, tensor.device)
if value is not None:
fill = tensor.new(shape).fill_(value)
self.assertEqual(tensor, fill)
def get_int64_dtype(dtype):
module = '.'.join(str(dtype).split('.')[1:-1])
if not module:
return torch.int64
return operator.attrgetter(module)(torch).int64
default_dtype = torch.get_default_dtype()
check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False)
check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False)
for dtype in dtypes:
for rg in {dtype.is_floating_point, False}:
int64_dtype = get_int64_dtype(dtype)
v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg)
check_value(v, dtype, layout, device, None, rg)
out = v.new()
check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, None, rg)
check_value(v.new_empty(shape), dtype, layout, device, None, False)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
check_value(torch.empty_like(v), dtype, layout, device, None, False)
check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
if dtype is not torch.float16 and layout != torch.sparse_coo:
fv = 3
v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg)
check_value(v, dtype, layout, device, fv, rg)
check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False)
out = v.new()
check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, fv + 2, rg)
check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 3, False)
check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False)
check_value(torch.full_like(v, fv + 5,
dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 5, False)
# FIXME: improve load_tests() documentation here
running_script_path = None
def set_running_script_path():
global running_script_path
try:
running_file = os.path.abspath(os.path.realpath(sys.argv[0]))
if running_file.endswith('.py'): # skip if the running file is not a script
running_script_path = running_file
except Exception:
pass
def check_test_defined_in_running_script(test_case):
if running_script_path is None:
return
test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__)))
assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \
"is not defined in the running script \"{}\", but in \"{}\". Did you " \
"accidentally import a unittest.TestCase from another file?".format(
test_case.id(), running_script_path, test_case_class_file)
def load_tests(loader, tests, pattern):
set_running_script_path()
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
check_test_defined_in_running_script(test)
test_suite.addTest(test)
return test_suite
# FIXME: document this and move it to test_serialization
class BytesIOContext(io.BytesIO):
def __enter__(self):
return self
def __exit__(self, *args):
pass
# Tentative value for nondet_tol for gradcheck when backward implementation
# relies on nondeterministic operations, i.e., those listed here:
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html
#
# For more information see https://github.com/pytorch/pytorch/issues/56202
GRADCHECK_NONDET_TOL = 1e-12
def is_slow_gradcheck_env() -> bool:
return os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0") == "1"
skipIfSlowGradcheckEnv = unittest.skipIf(
is_slow_gradcheck_env(),
"Tests that don't use gradcheck don't need to run on slow_gradcheck CI"
)
def gradcheck(fn, inputs, **kwargs):
# Wrapper around gradcheck that enables certain keys by default.
# Use this testing-internal gradcheck instead of autograd.gradcheck so that new features like vmap and
# forward-mode AD are tested by default. We create this wrapper because we'd like to keep new checks
# to be disabled to default for the public-facing api to avoid breaking user code.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradcheck.
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if is_slow_gradcheck_env():
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradcheck(fn, inputs, **kwargs)
def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs):
# Wrapper around gradgradcheck that enables certain keys by default
# See gradcheck above for an explanation of why we need something like this.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradgradcheck
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if is_slow_gradcheck_env():
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs)
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs, **kwargs):
# call assert function rather than returning a bool since it's nicer
# if we get whether this failed on the gradcheck or the gradgradcheck.
test_case.assertTrue(gradcheck(apply_fn, inputs, **kwargs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs, **kwargs))
@contextmanager
def set_cwd(path: str) -> Iterator[None]:
old_cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old_cwd)
# FIXME: delete this
# Using @toleranceOverride specific to your test is the recommended way
# of doing this. These are just some values that worked for test_nn.
dtype2prec_DONTUSE = {torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2,
torch.bfloat16: 1e-1}
# FIXME: move to test_sparse or sparse utils
# This is a wrapper that wraps a test to run this test twice, one with
# coalesced=True, another with coalesced=False for coalesced/uncoalesced sparse tensors.
def coalescedonoff(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
f(self, *args, **kwargs, coalesced=True)
f(self, *args, **kwargs, coalesced=False)
return wrapped
@contextlib.contextmanager
def disable_gc():
if gc.isenabled():
try:
gc.disable()
yield
finally:
gc.enable()
else:
yield
def find_library_location(lib_name: str) -> Path:
# return the shared library file in the installed folder if exist,
# else the file in the build folder
torch_root = Path(torch.__file__).resolve().parent
path = torch_root / 'lib' / lib_name
if os.path.exists(path):
return path
torch_root = Path(__file__).resolve().parent.parent.parent
return torch_root / 'build' / 'lib' / lib_name
def sandcastle_skip(reason):
"""
Similar to unittest.skip, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
return wrapper
return decorator
def mock_wrapper(method):
"""
Returns a function that calls the real implementation of a method
in addition to passing args to a mock object.
"""
mock = MagicMock()
@wraps(method)
def wrapper(self, *args, **kwargs):
mock(*args, **kwargs)
return method(self, *args, **kwargs)
wrapper.mock = mock # type: ignore[attr-defined]
return wrapper
def get_tensors_from(args, kwargs):
""" Returns a set of all Tensor objects in the given args and kwargs. """
return set([arg for arg in args if isinstance(arg, Tensor)] +
[v for v in kwargs.values() if isinstance(v, Tensor)])
# Returns scalar tensor representation of a list of integer byte values
def bytes_to_scalar(byte_list: List[int], dtype: torch.dtype, device: torch.device):
dtype_to_ctype: Dict[torch.dtype, Any] = {
torch.int8: ctypes.c_int8,
torch.uint8: ctypes.c_uint8,
torch.int16: ctypes.c_int16,
torch.int32: ctypes.c_int32,
torch.int64: ctypes.c_int64,
torch.bool: ctypes.c_bool,
torch.float32: ctypes.c_float,
torch.complex64: ctypes.c_float,
torch.float64: ctypes.c_double,
torch.complex128: ctypes.c_double,
}
ctype = dtype_to_ctype[dtype]
num_bytes = ctypes.sizeof(ctype)
def check_bytes(byte_list):
for byte in byte_list:
assert 0 <= byte <= 255
if dtype.is_complex:
assert len(byte_list) == (num_bytes * 2)
check_bytes(byte_list)
real = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[:num_bytes])).value
imag = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[num_bytes:])).value
res = real + 1j * imag
else:
assert len(byte_list) == num_bytes
check_bytes(byte_list)
res = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list)).value
return torch.tensor(res, device=device, dtype=dtype)
def sandcastle_skip_if(condition, reason):
"""
Similar to unittest.skipIf, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if condition:
if IS_SANDCASTLE:
@wraps(func)
def wrapper(*args, **kwargs):
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return wrapper
else:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
return decorator
def dtype_name(dtype):
""" Returns the pretty name of the dtype (e.g. torch.int64 -> int64). """
return str(dtype).split('.')[1]
def set_single_threaded_if_parallel_tbb(fn):
"""Set test to be single threaded for parallel tbb.
See https://github.com/pytorch/pytorch/issues/64571#issuecomment-914691883
"""
if not IS_TBB:
return fn
@wraps(fn)
def wrap_fn(*args, **kwargs):
num_threads = torch.get_num_threads()
torch.set_num_threads(1)
try:
return fn(*args, **kwargs)
finally:
torch.set_num_threads(num_threads)
return wrap_fn
@functools.lru_cache()
def get_cycles_per_ms() -> float:
"""Measure and return approximate number of cycles per millisecond for torch.cuda._sleep
"""
def measure() -> float:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
return cycles_per_ms
# Get 10 values and remove the 2 max and 2 min and return the avg.
# This is to avoid system disturbance that skew the results, e.g.
# the very first cuda call likely does a bunch of init, which takes
# much longer than subsequent calls.
#
# Tested on both Tesla V100, Quadro GP100, Titan RTX, RTX 3090 GPUs
# and seems to return stable values. Therefore, we enable caching
# using lru_cache decorator above.
num = 10
vals = []
for _ in range(num):
vals.append(measure())
vals = sorted(vals)
return mean(vals[2 : num - 2])
# OpInfo utils
T = TypeVar('T')
def first_sample(self: unittest.TestCase, samples: Iterable[T]) -> T:
"""
Returns the first sample from an iterable of samples, like those returned by OpInfo.
The test will be skipped if no samples are available.
"""
try:
return next(iter(samples))
except StopIteration:
raise unittest.SkipTest('Skipped! Need at least 1 sample input')
# this helper method is to recursively
# clone the tensor-type input of operators tested by OpInfo
def clone_input_helper(input):
if isinstance(input, torch.Tensor):
return torch.clone(input)
if isinstance(input, Sequence):
return tuple(map(clone_input_helper, input))
return input
@contextmanager
def custom_op(opname, symbolic_fn, opset_version):
"""Context manager/decorator to test ONNX export with custom oeprator"""
try:
register_custom_op_symbolic(opname, symbolic_fn, opset_version)
yield
finally:
unregister_custom_op_symbolic(opname, opset_version)
| pytorch-master | torch/testing/_internal/common_utils.py |
import math
import torch
import torch.nn as nn
class LinearReluFunctionalChild(nn.Module):
def __init__(self, N):
super().__init__()
self.w1 = nn.Parameter(torch.empty(N, N))
self.b1 = nn.Parameter(torch.zeros(N))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = torch.nn.functional.linear(x, self.w1, self.b1)
x = torch.nn.functional.relu(x)
return x
class LinearReluFunctional(nn.Module):
def __init__(self, N):
super().__init__()
self.child = LinearReluFunctionalChild(N)
self.w1 = nn.Parameter(torch.empty(N, N))
self.b1 = nn.Parameter(torch.zeros(N))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = self.child(x)
x = torch.nn.functional.linear(x, self.w1, self.b1)
x = torch.nn.functional.relu(x)
return x
| pytorch-master | torch/testing/_internal/quantization_torch_package_models.py |
r"""Importing this file includes common utility methods for checking quantized
tensors and modules.
"""
import numpy as np
import torch
from contextlib import contextmanager
from torch.testing._internal.common_utils import TEST_WITH_ASAN, TEST_WITH_TSAN, TEST_WITH_UBSAN, IS_PPC, IS_MACOS, IS_WINDOWS
supported_qengines = torch.backends.quantized.supported_engines
supported_qengines.remove('none')
# Note: We currently do not run QNNPACK tests on WINDOWS and MACOS as it is flaky. Issue #29326
# QNNPACK is not supported on PPC
# QNNPACK throws ASAN heap-buffer-overflow error.
if 'qnnpack' in supported_qengines and any([IS_PPC, TEST_WITH_ASAN, TEST_WITH_TSAN, TEST_WITH_UBSAN, IS_MACOS, IS_WINDOWS]):
supported_qengines.remove('qnnpack')
def _conv_output_shape(input_size, kernel_size, padding, stride, dilation,
output_padding=0):
"""Computes the output shape given convolution parameters."""
return np.floor((input_size + 2 * padding - kernel_size - (kernel_size - 1)
* (dilation - 1)) / stride) + 2 * output_padding + 1
# Quantization references
def _quantize(x, scale, zero_point, qmin=None, qmax=None, dtype=np.uint8):
"""Quantizes a numpy array."""
if qmin is None:
qmin = np.iinfo(dtype).min
if qmax is None:
qmax = np.iinfo(dtype).max
qx = np.round(x / scale + zero_point).astype(np.int64)
qx = np.clip(qx, qmin, qmax)
qx = qx.astype(dtype)
return qx
def _dequantize(qx, scale, zero_point):
"""Dequantizes a numpy array."""
x = (qx.astype(float) - zero_point) * scale
return x
def _requantize(x, multiplier, zero_point, qmin=0, qmax=255, qtype=np.uint8):
"""Requantizes a numpy array, i.e., intermediate int32 or int16 values are
converted back to given type"""
qx = (x * multiplier).round() + zero_point
qx = np.clip(qx, qmin, qmax).astype(qtype)
return qx
def _calculate_dynamic_qparams(X, dtype, reduce_range=False, qscheme=torch.per_tensor_affine):
"""Calculate the dynamic quantization parameters (scale, zero_point)
according to the min and max element of the tensor"""
assert qscheme in (torch.per_tensor_affine, torch.per_tensor_symmetric)
if qscheme == torch.per_tensor_symmetric:
assert dtype == torch.qint8
if isinstance(X, torch.Tensor):
X = X.numpy()
if dtype == torch.qint8:
if reduce_range:
qmin, qmax = -64, 63
else:
qmin, qmax = -128, 127
else: # dtype == torch.quint8
if reduce_range:
qmin, qmax = 0, 127
else:
qmin, qmax = 0, 255
min_val = X.min()
max_val = X.max()
is_symmetric = (qscheme == torch.per_tensor_symmetric)
if min_val == max_val:
scale = 1.0
zero_point = 0
else:
if is_symmetric:
max_val = max(max_val, -min_val)
min_val = -max_val
scale = (max_val - min_val) / (qmax - qmin)
scale = max(scale, np.finfo(np.float32).eps)
zero_point = 0
else:
max_val = max(max_val, 0.0)
min_val = min(min_val, 0.0)
scale = (max_val - min_val) / (qmax - qmin)
scale = max(scale, np.finfo(np.float32).eps)
zero_point = qmin - round(min_val / scale)
zero_point = max(qmin, zero_point)
zero_point = min(qmax, zero_point)
return [float(scale), int(zero_point)]
def _calculate_dynamic_per_channel_qparams(X, dtype):
"""Calculate the dynamic quantization parameters (scale, zero_point)
according to the min and max element of the tensor"""
if isinstance(X, torch.Tensor):
X = X.numpy()
qmin, qmax = torch.iinfo(dtype).min, torch.iinfo(dtype).max
n_levels = qmax - qmin
scale = np.zeros(X.shape[0], dtype=np.float64)
zero_point = np.zeros(X.shape[0], dtype=np.int64)
for i in range(zero_point.shape[0]):
min_val = X.min()
max_val = X.max()
if min_val == max_val:
scale[i] = 1.0
zero_point[i] = 0
else:
max_val = max(max_val, 0.0)
min_val = min(min_val, 0.0)
scale[i] = (max_val - min_val) / n_levels
scale[i] = max(scale[i], np.finfo(np.float32).eps)
zero_point[i] = qmin - round(min_val / scale[i])
zero_point[i] = max(qmin, zero_point[i])
zero_point[i] = min(qmax, zero_point[i])
return scale, zero_point
def _snr(x, x_hat):
"""Calculates the signal to noise ratio and returns the signal and noise
power, as well as the SNR in dB.
If the input is a list/tuple this function is called recursively on each
element. The result will have the same nested structure as the inputs.
Args:
x, x_hat: Either a tensor or a nested list/tuple of tensors.
Returns:
signal, noise, SNR(in dB): Either floats or a nested list of floats
"""
if isinstance(x, (list, tuple)):
assert(len(x) == len(x_hat))
res = []
for idx in range(len(x)):
res.append(_snr(x[idx], x_hat[idx]))
return res
if x_hat.is_quantized:
x_hat = x_hat.dequantize()
if x.is_quantized:
x = x.dequantize()
noise = (x - x_hat).norm()
if noise == 0:
return 0.0, float('inf'), float('inf')
signal = x.norm()
snr = signal / noise
snr_db = 20 * snr.log10()
return signal, noise, snr_db
@contextmanager
def override_quantized_engine(qengine):
previous = torch.backends.quantized.engine
torch.backends.quantized.engine = qengine
try:
yield
finally:
torch.backends.quantized.engine = previous
@contextmanager
def override_cpu_allocator_for_qnnpack(qengine_is_qnnpack):
try:
if qengine_is_qnnpack:
torch._C._set_default_mobile_cpu_allocator()
yield
finally:
if qengine_is_qnnpack:
torch._C._unset_default_mobile_cpu_allocator()
# TODO: Update all quantization tests to use this decorator.
# Currently for some of the tests it seems to have inconsistent params
# for fbgemm vs qnnpack.
def override_qengines(qfunction):
def test_fn(*args, **kwargs):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
# qfunction should not return anything.
qfunction(*args, **kwargs)
return test_fn
def qengine_is_fbgemm():
return torch.backends.quantized.engine == 'fbgemm'
def qengine_is_qnnpack():
return torch.backends.quantized.engine == 'qnnpack'
def qengine_is_onednn():
return torch.backends.quantized.engine == 'onednn'
# Helper function used to simulate per-channel fake-quant against any axis
def _permute_to_axis_zero(X, axis):
new_axis_list = list(range(X.dim()))
new_axis_list[axis] = 0
new_axis_list[0] = axis
y = X.permute(tuple(new_axis_list))
return y, new_axis_list
# Reference method for fake quantize
# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64
def _fake_quantize_per_channel_affine_reference(X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max):
dtype = X.dtype
X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis)
res = torch.zeros_like(X)
for i in range(X.size()[0]):
res[i] = (torch.clamp(torch.round(X[i] * (1.0 / per_channel_scale[i]) +
per_channel_zero_point[i]), quant_min, quant_max) - per_channel_zero_point[i]) * per_channel_scale[i]
out = res.permute(tuple(permute_axis_list))
return out.to(dtype)
# Reference method for the gradient of the fake quantize operator
# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64
def _fake_quantize_per_channel_affine_grad_reference(dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max):
dtype = X.dtype
X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis)
Xq = torch.zeros_like(X)
for i in range(X.size()[0]):
Xq[i] = torch.round(X[i] * (1.0 / per_channel_scale[i]) + per_channel_zero_point[i])
Xq = Xq.permute(tuple(permute_axis_list))
mask = (Xq >= quant_min) * (Xq <= quant_max)
res = torch.zeros_like(dY)
res[mask] = dY[mask]
return res.to(dtype)
def to_tensor(X, device):
if not isinstance(X, torch.Tensor):
X = torch.tensor(X)
else:
X = X.clone().detach()
return X.to(device=torch.device(device), dtype=torch.float32)
| pytorch-master | torch/testing/_internal/common_quantized.py |
import torch
from copy import deepcopy
from torch.utils._pytree import tree_map
# TODO: Move LoggingTensor here.
from torch.testing._internal.logging_tensor import LoggingTensor
# Base class for wrapper-style tensors.
class WrapperTensor(torch.Tensor):
@staticmethod
def __new__(cls, *args, **kwargs):
t, kwargs = cls.get_wrapper_properties(*args, **kwargs)
if "size" not in kwargs:
size = t.size()
else:
size = kwargs["size"]
del kwargs["size"]
if "dtype" not in kwargs:
kwargs["dtype"] = t.dtype
if "layout" not in kwargs:
kwargs["layout"] = t.layout
if "device" not in kwargs:
kwargs["device"] = t.device
if "requires_grad" not in kwargs:
kwargs["requires_grad"] = False
# Ignore memory_format and pin memory for now as I don't know how to
# safely access them on a Tensor (if possible??)
wrapper = torch.Tensor._make_wrapper_subclass(cls, size, **kwargs)
wrapper._validate_methods()
return wrapper
@classmethod
def get_wrapper_properties(cls, *args, **kwargs):
# Should return both an example Tensor and a dictionaly of kwargs
# to override any of that example Tensor's properly.
# This is very similar to the `t.new_*(args)` API
raise NotImplementedError("You need to implement get_wrapper_properties")
def _validate_methods(self):
# Skip this if not in debug mode?
# Changing these on the python side is wrong as it would not be properly reflected
# on the c++ side
# This doesn't catch attributes set in the __init__
forbidden_overrides = ["size", "stride", "dtype", "layout", "device", "requires_grad"]
for el in forbidden_overrides:
if getattr(self.__class__, el) is not getattr(torch.Tensor, el):
raise RuntimeError(f"Subclass {self.__class__.__name__} is overwriting the "
f"property {el} but this is not allowed as such change would "
"not be reflected to c++ callers.")
class DiagTensorBelow(WrapperTensor):
@classmethod
def get_wrapper_properties(cls, diag, requires_grad=False):
assert diag.ndim == 1
return diag, {"size": diag.size() + diag.size(), "requires_grad": requires_grad}
def __init__(self, diag, requires_grad=False):
self.diag = diag
handled_ops = {}
# We disable torch function here to avoid any unwanted wrapping of the output
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
if not all(issubclass(cls, t) for t in types):
return NotImplemented
# For everything else, call the handler:
fn = cls.handled_ops.get(func.__name__, None)
if fn:
return fn(*args, **kwargs or {})
else:
# Note that here, because we don't need to provide the autograd formulas
# we can have a default "fallback" that creates a plain Tensor based
# on the diag elements and calls the func again.
def unwrap(e):
return e.diag.diag() if isinstance(e, DiagTensorBelow) else e
def wrap(e):
if isinstance(e, torch.Tensor) and e.ndim == 1:
return DiagTensorBelow(e)
if isinstance(e, torch.Tensor) and e.ndim == 2 and e.count_nonzero() == e.diag().count_nonzero():
return DiagTensorBelow(e.diag())
return e
rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {})))
return rs
def __repr__(self):
return super().__repr__(tensor_contents=f"diag={self.diag}")
class SparseTensor(WrapperTensor):
@classmethod
def get_wrapper_properties(cls, size, values, indices, requires_grad=False):
assert values.device == indices.device
return values, {"size": size, "requires_grad": requires_grad}
def __init__(self, size, values, indices, requires_grad=False):
self.values = values
self.indices = indices
def __repr__(self):
return super().__repr__(tensor_contents=f"values={self.values}, indices={self.indices}")
def sparse_to_dense(self):
res = torch.zeros(self.size(), dtype=self.values.dtype)
res[self.indices.unbind(1)] = self.values
return res
@staticmethod
def from_dense(t):
indices = t.nonzero()
values = t[indices.unbind(1)]
return SparseTensor(t.size(), values, indices)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
func_name = f"{func.__module__}.{func.__name__}"
res = cls._try_call_special_impl(func_name, args, kwargs)
if res is not NotImplemented:
return res
# Otherwise, use a default implementation that construct dense
# tensors and use that to compute values
def unwrap(e):
return e.sparse_to_dense() if isinstance(e, SparseTensor) else e
# Wrap back all Tensors into our custom class
def wrap(e):
# Check for zeros and use that to get indices
return SparseTensor.from_dense(e) if isinstance(e, torch.Tensor) else e
rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {})))
return rs
# To show how things happen later
def __rmul__(self, other):
return super().__rmul__(other)
_SPECIAL_IMPLS = {}
@classmethod
def _try_call_special_impl(cls, func, args, kwargs):
if func not in cls._SPECIAL_IMPLS:
return NotImplemented
return cls._SPECIAL_IMPLS[func](args, kwargs)
# Example non-wrapper subclass that stores extra state.
class NonWrapperTensor(torch.Tensor):
def __new__(cls, data):
t = torch.Tensor._make_subclass(cls, data)
t.extra_state = {
'last_func_called': None
}
return t
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
result = super().__torch_function__(func, types, args, kwargs)
if isinstance(result, cls):
# Do something with the extra state. For the example here, just store the name of the
# last function called (skip for deepcopy so the copy has the same extra state).
if func is torch.Tensor.__deepcopy__:
result.extra_state = deepcopy(args[0].extra_state)
else:
result.extra_state = {
'last_func_called': func.__name__,
}
return result
# new_empty() must be defined for deepcopy to work
def new_empty(self, shape):
return type(self)(torch.empty(shape))
# Class used to store info about subclass tensors used in testing.
class SubclassInfo:
__slots__ = ['name', 'create_fn', 'closed_under_ops']
def __init__(self, name, create_fn, closed_under_ops=True):
self.name = name
self.create_fn = create_fn # create_fn(shape) -> tensor instance
self.closed_under_ops = closed_under_ops
subclass_db = {
torch.Tensor: SubclassInfo(
'base_tensor', create_fn=lambda shape: torch.randn(shape)
),
NonWrapperTensor: SubclassInfo(
'non_wrapper_tensor',
create_fn=lambda shape: NonWrapperTensor(torch.randn(shape))
),
LoggingTensor: SubclassInfo(
'logging_tensor',
create_fn=lambda shape: LoggingTensor(torch.randn(shape))
),
SparseTensor: SubclassInfo(
'sparse_tensor',
create_fn=lambda shape: SparseTensor.from_dense(torch.randn(shape).relu())
),
DiagTensorBelow: SubclassInfo(
'diag_tensor_below',
create_fn=lambda shape: DiagTensorBelow(torch.randn(shape)),
closed_under_ops=False # sparse semantics
),
}
| pytorch-master | torch/testing/_internal/common_subclass.py |
import torch
from torch import Tensor
import contextlib
import itertools
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from functools import partial
from torch.utils._mode_utils import no_dispatch
from torch.utils._python_dispatch import enable_torch_dispatch_mode
import torch.autograd.forward_ad as fwAD
from torch.overrides import enable_reentrant_dispatch
from typing import Callable
import re
def check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor):
elem = wrapper_tensor.elem
metadata_wrapper_tensor = metadata_accessor(wrapper_tensor)
metadata_elem = metadata_accessor(elem)
if metadata_wrapper_tensor == metadata_elem:
return
raise RuntimeError(
f"This operator is not Composite Compliant: the "
f"{metadata_name} of the tensor was modified directly without "
f"going through the PyTorch dispatcher.")
def check_metadata_consistency(wrapper_tensor, CCT):
# CCT: CompositeCompliantTensor class which is generated using generate_cct
if not isinstance(wrapper_tensor, CCT):
return
things_to_check = {
'shape': Tensor.size,
'dtype': lambda x: x.dtype,
'device': lambda x: x.device,
'numel': Tensor.numel,
'stride': Tensor.stride,
'storage_offset': Tensor.storage_offset,
}
for metadata_name, metadata_accessor in things_to_check.items():
check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor)
def is_view_fn(func):
return func.overloadpacket.__name__ in {
'as_strided',
'detach',
'diagonal',
'expand',
'expand_as',
'movedim',
'narrow',
'permute',
'select',
'squeeze',
'transpose',
't',
'real',
'imag',
'view_as_real',
'view_as_complex',
'unflatten',
'unfold',
'unsqueeze',
'view',
'view_as',
'unbind',
'split',
'split_with_sizes',
'vsplit',
'hsplit',
'tensor_split',
'chunk',
'swapaxes',
'slice',
'_reshape_alias',
'_unsafe_view',
'_conj',
'alias',
}
# manually populated from native_functions that have inplace_view: True.
# In the future we will probably be able to grab that list directly
def is_inplace_view_fn(func):
return func.overloadpacket.__name__ in {
'as_strided_',
'detach_',
'squeeze_',
'swapaxes_',
'swapdims_',
't_',
'transpose_',
'unsqueeze_',
}
# Introspection please save us
def is_inplace(func):
name = func.overloadpacket.__name__
if re.match('__i.+__', name):
return True
if re.match('__.+__', name):
return False
return name[-1] == '_'
def generate_cct(enable_recursive_torch_dispatch=False,
autograd_view_consistency=True):
# This function returns a new class CompositeCompliantTensor
# The two arguments control the behaviour described below.
# enable_recursive_torch_dispatch:
# If True, enable __torch_dispatch__ before calling the func in
# CCT's __torch_dispatch__ implementation else call
# the func under `no_dispatch`.
# NOTE: We need to disable dispatch under Torch Dispatch Mode,
# to avoid infinite recursion.
# Also, we need to enable dispatch for checking
# forward_AD composite compliance
# Refer: https://github.com/pytorch/pytorch/issues/75652
# autograd_view_consistency:
# If True, alias result using `set_` if func returns a view
# (See Note [Alias Result]).
# Since Forward AD doesn't work with `set_`
# we disable it by setting alias to False.
class CompositeCompliantTensor(torch.Tensor):
elem: torch.Tensor
__slots__ = ['elem']
__torch_function__ = torch._C._disabled_torch_function_impl
@staticmethod
def __new__(cls, elem, *args, **kwargs):
assert type(elem) is not cls, \
"Wrapping a CompositeCompliantTensor in a CompositeCompliantTensor is not supported"
# The storage of CompositeCompliantTensor should never be used directly
# by a Composite operation; if the Composite
# operator attempts to read from the storage without dispatching then it'll
# raise a RuntimeError due to it being a meta storage.
r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
cls, elem.size(),
dtype=elem.dtype, layout=elem.layout,
device=elem.device, requires_grad=elem.requires_grad,
strides=elem.stride(), storage_offset=elem.storage_offset())
if elem.requires_grad:
# CompositeCompliantTensor steals the "requires_grad"-ness.
# Why a new copy of `elem`? Because sometimes OpInfo shares inputs between tests...
tmp = torch.empty_strided(elem.shape, elem.stride(), dtype=elem.dtype,
device=elem.device, layout=elem.layout,
requires_grad=False)
tmp.copy_(elem.detach())
r.elem = tmp
else:
r.elem = elem
assert r.stride() == r.elem.stride()
# Propagate conjugate bits to the wrapper tensor
# Ref: https://github.com/albanD/subclass_zoo/issues/24
# Ref: https://github.com/albanD/subclass_zoo/issues/21
torch._C._set_conj(r, r.elem.is_conj())
torch._C._set_neg(r, r.elem.is_neg())
return r
def __repr__(self):
return f"CompositeCompliantTensor({self.elem})"
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
def unwrap(e):
return e.elem if isinstance(e, CompositeCompliantTensor) else e
def wrap(e):
return CompositeCompliantTensor(e) if isinstance(e, torch.Tensor) else e
if func == torch.ops.aten._local_scalar_dense.default:
raise RuntimeError(
".item() is not allowed to be called inside of composite "
"functions in the PyTorch library because not all backends "
"and/or Tensor subclasses (e.g. vmap, ProxyTensor) support them.")
if func.overloadpacket.__name__ in ('set_', 'resize_'):
raise RuntimeError(
f"{func.__name__} is not allowed to be called inside of "
f"Composite operators.")
if is_inplace(func):
# NB: We are making an assumption that if the function is in-place,
# then the first argument is being written to. Introspection please save us!
mutated_argument = args[0]
if not isinstance(mutated_argument, CompositeCompliantTensor) and \
any([isinstance(a, CompositeCompliantTensor) for a in args[1:]]):
raise RuntimeError(
'Not composite compliant: performing in-place operation '
f'{func.__name__} where the Tensor being written to is '
'regular Tensor but the other tensors are Tensor Subclasses. '
'Please try to avoid this in-place operation.')
with enable_reentrant_dispatch():
with contextlib.nullcontext() if enable_recursive_torch_dispatch else no_dispatch():
unwrapped_args = tree_map(unwrap, args)
unwrapped_kwargs = tree_map(unwrap, kwargs)
unwrapped_rs = func(*unwrapped_args, **unwrapped_kwargs)
rs = tree_map(wrap, unwrapped_rs)
if is_view_fn(func) and autograd_view_consistency:
# Note [Alias Result]
# Autograd asserts that for B = A.view_fn(...), B and A's storages
# are the same. Here we try to make B alias A to avoid those asserts.
# See https://github.com/pytorch/pytorch/issues/65339 for more information
# about the issue.
with enable_reentrant_dispatch():
with no_dispatch():
# Idea: this is a weird way of getting a storage that aliases the input.
# This is a workaround for #65339.
# 1. under no_dispatch, all of the wrapper tensors look like regular
# tensors with special storage (the storage is nullptr and
# advertises CPU/CUDA device.
# 2. we run func, which ends up running the view operation
# 3. All view operations reuse the input's storage and return
# result Tensor(s) with new sizes/strides/offset that alias
# the input.
# 4. we set the storage (and sizes/strides/offset) of the wrapper
# tensor results to be that of the tensors that alias the input
result = func(*args, **kwargs)
if isinstance(result, tuple) or isinstance(result, list):
for a, b in zip(rs, result):
a.set_(b)
else:
rs.set_(result)
# Some operations are allowed to in-place modify the metadata of the
# inputs. The only ones are the "inplace view functions"; when we
# run into these, we manually modify the metadata of the input.
with no_dispatch():
if is_inplace_view_fn(func):
func(*args, **kwargs)
# For each CompositeCompliantTensor t, we check that t and t.elem
# have consistent metadata. If they don't have consistent metadata,
# that means the operator did something fishy.
check = partial(check_metadata_consistency, CCT=cls)
tree_map(check, args)
tree_map(check, kwargs)
tree_map(check, rs)
return rs
return CompositeCompliantTensor
def is_tensorlist(lst):
if not isinstance(lst, list) and not isinstance(lst, tuple):
return False
if len(lst) == 0:
return False
all_tensors = all([isinstance(elt, torch.Tensor) for elt in lst])
if all_tensors:
return True
exists_one_tensor = all([isinstance(elt, torch.Tensor) for elt in lst])
if exists_one_tensor:
raise RuntimeError('This test assumes that PyTorch APIs cannot take '
'mixed lists of Tensor and other things')
return False
def maybe_map(fn, should_map, arg):
return fn(arg) if should_map else arg
def wrap(arg, CCT):
# CCT: CompositeCompliantTensor class which is generated using generate_cct
if isinstance(arg, torch.Tensor):
return CCT(arg)
if is_tensorlist(arg):
return [CCT(a) for a in arg]
raise RuntimeError("wrap assumes that the input can be wrapped")
# Given a list of flat arguments, some of which may be Tensors, return all
# possible ways some of the arguments could be CompositeCompliantTensors (CCT).
# For example, given Tensors A, B, C and flat_args = [A, 1, B],
# We would return the following 4 options:
# [CCT(A), 1, CCT(B)]
# [CCT(A), 1, B]
# [A, 1, CCT(B)]
# [A, 1, B]
# NB: Yes, this is exponential. No, we don't care too much because PyTorch ops
# don't accept that many input Tensors.
def generate_subclass_choices(flat_args, CCT):
# CCT: CompositeCompliantTensor class which is generated using generate_cct
is_tensor_likes = [isinstance(arg, torch.Tensor) or is_tensorlist(arg) for arg in flat_args]
subclass_options = [[False, True] if is_tensor_like else [False] for is_tensor_like in is_tensor_likes]
for which_args_are_wrapped in itertools.product(*subclass_options):
result = [maybe_map(partial(wrap, CCT=CCT), should_wrap_arg, arg)
for should_wrap_arg, arg in zip(which_args_are_wrapped, flat_args)]
yield result, which_args_are_wrapped
# For an operation f(*args, **kwargs), each Tensor argument may either be
# a regular Tensor or a Tensor Subclass. This iterator iterates through
# all of those options.
def generate_subclass_choices_args_kwargs(args, kwargs, CCT):
# CCT: CompositeCompliantTensor class which is generated using generate_cct
flat_kwargs, spec = tree_flatten(kwargs)
flat_args_kwargs = list(args) + list(flat_kwargs)
for choice, debug_metadata in generate_subclass_choices(flat_args_kwargs, CCT):
new_args = choice[:len(args)]
new_kwargs = tree_unflatten(choice[len(args):], spec)
which_args_are_wrapped = debug_metadata[:len(args)]
which_kwargs_are_wrapped = tree_unflatten(debug_metadata[len(args):], spec)
yield new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped
def raise_composite_compliance_error(err, additional_info=''):
raise RuntimeError(
"Composite compilance check failed with "
"the above error.\n"
f"{additional_info}"
"If you are adding an OpInfo of an "
"existing operator, please feel free to skip this test "
"because the problem was pre-existing and file an issue. "
"Otherwise, if you added a new operator, please read "
"through the Composite Compliance section in "
"aten/src/ATen/native/README.md for how to resolve this. "
) from err
# This test checks ALL possible permutations of calling `op` with arguments
# that are individually either a regular Tensor or a Tensor subclass.
#
# The general strategy is to wrap some Tensor args and kwargs in
# CompositeCompliantTensor wrappers and call the operation.
# If some composite operation does any non-compliant behavior,
# CompositeCompliantTensor will raise an error.
def check_all_permutations(op, args, kwargs, assert_equal_fn):
CCT = generate_cct()
expected = op(*args, **kwargs)
for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT):
new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice
try:
actual = op(*new_args, **new_kwargs)
# NOTE: [What errors are Composite Compliance trying to catch?]
#
# There's two things we want to catch:
# - errors that would raise within the torch_dispatch impl
# - data_ptr accesses
# The first is easy to filter for (we could make the error a different
# error class), the second is always going to be a RuntimeError due to
# how it is implemented (if you try to access the data_ptr of thex
# wrapper Tensor, it raises you some internal RuntimeError).
#
# So the most general thing to catch here was RuntimeError. If you
# are here and debugging why your test failed, it's plausible that
# the operator itself is broken and that there are other tests failing.
except RuntimeError as err:
raise_composite_compliance_error(
err,
f"- wrapped_args: {which_args_are_wrapped}\n"
f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
)
def unwrap(e):
return e.elem if isinstance(e, CCT) else e
assert_equal_fn(tree_map(unwrap, actual), expected)
# Checks via the usage of torch dispatch mode certain anti-patterns that
# are not composite compliant.
#
# In particular, the anti-pattern we are trying to prevent is a user
# creating an empty tensor and then resize_-ing it. Torch Dispatch Mode helps
# here because all factory functions will create tensors that are
# CompositeCompliantTensor.
#
# The general strategy is to wrap all Tensor args and kwargs in
# CompositeCompliantTensor wrappers. If an operator that is
# Composite does any non-compliant behavior,
# CompositeCompliantTensor will raise an error.
def check_with_mode(op, args, kwargs, assert_equal_fn):
CCT = generate_cct()
def wrap(e):
return CCT(e) if isinstance(e, torch.Tensor) else e
expected = op(*args, **kwargs)
args = tree_map(wrap, args)
kwargs = tree_map(wrap, kwargs)
try:
with enable_torch_dispatch_mode(CCT):
actual = op(*args, **kwargs)
# see NOTE: [What errors are Composite Compliance trying to catch?]
except RuntimeError as err:
raise_composite_compliance_error(err)
def unwrap(e):
return e.elem if isinstance(e, CCT) else e
assert_equal_fn(tree_map(unwrap, actual), expected)
def gather_leaf_tensors(args, kwargs):
leaf_tensors = []
args, args_spec = tree_flatten(args)
kwargs, kwargs_spec = tree_flatten(kwargs)
args = args + kwargs
for arg in args:
if not isinstance(arg, torch.Tensor):
continue
if arg.requires_grad:
leaf_tensors.append(arg)
return leaf_tensors
# Checks if the backward formula is composite compliant by testing
# all possible permutations of {inputs, grad_outputs} being
# CompositeCompliantTensor or regular Tensors.
#
# NB: it is important that op is accepted as a Callable and not an OpInfo,
# this means we can apply check_backward_formula to things that aren't OpInfos
# while debugging.
def check_backward_formula(op: Callable, args, kwargs,
output_process_fn_grad=None,
gradcheck_wrapper=None, assert_equal_fn=None):
CCT = generate_cct()
def compute_expected_grads(args, kwargs):
if gradcheck_wrapper is None:
results = op(*args, **kwargs)
else:
results = gradcheck_wrapper(op, *args, **kwargs)
if output_process_fn_grad is not None:
results = output_process_fn_grad(results)
flat_results, _ = tree_flatten(results)
flat_diff_results = [r for r in flat_results if r.requires_grad]
assert len(flat_diff_results) > 0
grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype)
for r in flat_diff_results]
leaf_tensors = gather_leaf_tensors(args, kwargs)
assert len(leaf_tensors) > 0
return torch.autograd.grad(flat_diff_results, leaf_tensors,
grads, allow_unused=True, retain_graph=True)
expected = compute_expected_grads(args, kwargs)
for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT):
new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice
leaf_tensors = gather_leaf_tensors(new_args, new_kwargs)
assert len(leaf_tensors) > 0
try:
if gradcheck_wrapper is None:
results = op(*new_args, **new_kwargs)
else:
results = gradcheck_wrapper(op, *new_args, **new_kwargs)
if output_process_fn_grad is not None:
results = output_process_fn_grad(results)
# see NOTE: [What errors are Composite Compliance trying to catch?]
except RuntimeError as err:
raise_composite_compliance_error(
err,
f"- wrapped_args: {which_args_are_wrapped}\n"
f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
)
flat_results, _ = tree_flatten(results)
flat_diff_results = [r for r in flat_results if r.requires_grad]
assert len(flat_diff_results) > 0
# NB: ones, not ones_like, so we get a regular Tensor here
grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype)
for r in flat_diff_results]
for flat_new_grads, which_grad_is_batched in generate_subclass_choices(grads, CCT):
try:
actual = torch.autograd.grad(flat_diff_results, leaf_tensors, flat_new_grads,
allow_unused=True, retain_graph=True)
# see NOTE: [What errors are Composite Compliance trying to catch?]
except RuntimeError as err:
raise_composite_compliance_error(
err,
f"- wrapped_args: {which_args_are_wrapped}\n"
f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
f"- wrapped_grads: {which_grad_is_batched}\n"
)
def unwrap(e):
return e.elem if isinstance(e, CCT) else e
assert_equal_fn(tuple(map(unwrap, actual)), expected, equal_nan=True)
# Checks if the forward AD formula is composite compliant by testing
# all possible permutations of {primals, tangents} being
# CompositeCompliantTensor or regular Tensors.
#
# NB: it is important that op is accepted as a Callable and not an OpInfo,
# this means we can apply check_forward_ad_formula to things that aren't OpInfos
# while debugging.
def check_forward_ad_formula(op: Callable, args, kwargs, gradcheck_wrapper=None, assert_equal_fn=None):
CCT = generate_cct(enable_recursive_torch_dispatch=True, autograd_view_consistency=False)
def maybe_tangent(t):
assert type(t) is not CCT
# Generate `tangent` tensor
# if given object is a Tensor and requires grad is set.
if isinstance(t, torch.Tensor) and t.requires_grad:
return torch.randn_like(t)
elif is_tensorlist(t):
return list(torch.randn_like(e) if e.requires_grad else None for e in t)
return None
tangent_args = tuple(maybe_tangent(arg) for arg in args)
flat_kwargs, spec = tree_flatten(kwargs)
flat_tangent_kwargs = tuple(maybe_tangent(arg) for arg in flat_kwargs)
tangent_kwargs = tree_unflatten(flat_tangent_kwargs, spec)
with fwAD.dual_level():
def maybe_make_dual(dual):
# Returns dual tensor if primal is a tensor/tensor subclass
# with requires_grad set.
primal, tangent = dual
if isinstance(primal, torch.Tensor) and primal.requires_grad:
return fwAD.make_dual(primal.detach(), tangent)
elif is_tensorlist(primal):
return tuple(fwAD.make_dual(pri.detach(), tang) if tang is not None else pri
for pri, tang in zip(primal, tangent))
return primal
def compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs):
op_args = tuple(map(maybe_make_dual, zip(args, tangent_args)))
op_kwargs = {k: maybe_make_dual((v, tangent_kwargs[k])) for k, v in kwargs.items()}
if gradcheck_wrapper is None:
return op(*op_args, **op_kwargs)
return gradcheck_wrapper(op, *op_args, **op_kwargs)
expected = compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs)
expected = tree_map(fwAD.unpack_dual, expected)
expected_primals = tree_map(lambda x: x.primal, expected)
expected_tangents = tree_map(lambda x: x.tangent, expected)
# Permutations of arg and kwargs in CCT.
for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT):
new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice
# Permutations tangent arg and tangent kwargs in CCT.
for tang_choice in generate_subclass_choices_args_kwargs(tangent_args, tangent_kwargs, CCT):
new_tang_args, new_tang_kwargs, \
which_tang_args_are_wrapped, which_tang_kwargs_are_wrapped = tang_choice
op_args = tuple(map(maybe_make_dual, zip(new_args, new_tang_args)))
op_kwargs = {k: maybe_make_dual((v, new_tang_kwargs[k])) for k, v in new_kwargs.items()}
try:
if gradcheck_wrapper is None:
actual = op(*op_args, **op_kwargs)
else:
actual = gradcheck_wrapper(op, *op_args, **op_kwargs)
# see NOTE: [What errors are Composite Compliance trying to catch?]
except RuntimeError as err:
raise_composite_compliance_error(
err,
f"- wrapped_args: {which_args_are_wrapped}\n"
f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
f"- wrapped_tangent_args: {which_tang_args_are_wrapped}\n"
f"- wrapped_tangent_kwargs: {which_tang_kwargs_are_wrapped}\n"
)
def unwrap(e):
return e.elem if isinstance(e, CCT) else e
actual = tree_map(fwAD.unpack_dual, actual)
actual_primals = tree_map(lambda x: unwrap(x.primal), actual)
actual_tangents = tree_map(lambda x: unwrap(x.tangent), actual)
assert_equal_fn(actual_primals, expected_primals, equal_nan=True)
assert_equal_fn(actual_tangents, expected_tangents, equal_nan=True)
| pytorch-master | torch/testing/_internal/composite_compliance.py |
import re
import sys
import time
from functools import partial, wraps
from typing import Tuple
import torch.distributed as dist
import torch.distributed.rpc as rpc
from torch.distributed.rpc import _rref_context_get_debug_info
from torch.testing._internal.common_utils import FILE_SCHEMA, TEST_WITH_TSAN
if not dist.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
INIT_METHOD_TEMPLATE = FILE_SCHEMA + "{file_name}"
def dist_init(
old_test_method=None,
setup_rpc: bool = True,
clean_shutdown: bool = True,
faulty_messages=None,
messages_to_delay=None,
):
"""
We use this decorator for setting up and tearing down state since
MultiProcessTestCase runs each `test*` method in a separate process and
each process just runs the `test*` method without actually calling
'setUp' and 'tearDown' methods of unittest.
Note: pass the string representation of MessageTypes that should be used
with the faulty agent's send function. By default, all retriable messages
("RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT", "RREF_USER_DELETE",
"CLEANUP_AUTOGRAD_CONTEXT_REQ") will use the faulty send (this default is
set from faulty_rpc_agent_test_fixture.py).
"""
# If we use dist_init without arguments (ex: @dist_init), old_test_method is
# appropriately set and we return the wrapper appropriately. On the other
# hand if dist_init has arguments (ex: @dist_init(clean_shutdown=False)),
# old_test_method is None and we return a functools.partial which is the real
# decorator that is used and as a result we recursively call dist_init with
# old_test_method and the rest of the arguments appropriately set.
if old_test_method is None:
return partial(
dist_init,
setup_rpc=setup_rpc,
clean_shutdown=clean_shutdown,
faulty_messages=faulty_messages,
messages_to_delay=messages_to_delay,
)
@wraps(old_test_method)
def new_test_method(self, *arg, **kwargs):
# Setting _ignore_rref_leak to make sure OwnerRRefs are properly deleted
# in tests.
import torch.distributed.rpc.api as api
api._ignore_rref_leak = False
self.worker_id = self.rank
self.setup_fault_injection(faulty_messages, messages_to_delay)
rpc_backend_options = self.rpc_backend_options
if setup_rpc:
if TEST_WITH_TSAN:
# TSAN runs much slower.
rpc_backend_options.rpc_timeout = rpc.constants.DEFAULT_RPC_TIMEOUT_SEC * 5
rpc.constants.DEFAULT_SHUTDOWN_TIMEOUT = 60
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
return_value = old_test_method(self, *arg, **kwargs)
if setup_rpc:
rpc.shutdown(graceful=clean_shutdown)
return return_value
return new_test_method
def noop() -> None:
pass
def wait_until_node_failure(rank: int, expected_error_regex: str = ".*") -> str:
"""
Loops until an RPC to the given rank fails. This is used to
indicate that the node has failed in unit tests.
Args:
rank (int): Rank of the node expected to fail
expected_error_regex (optional, str): Regex of exception message expected. Useful to ensure a specific failure
occurs, not just any.
"""
while True:
try:
rpc.rpc_sync("worker{}".format(rank), noop, args=())
time.sleep(0.1)
except Exception as e:
if re.search(pattern=expected_error_regex, string=str(e)):
return str(e)
def wait_until_pending_futures_and_users_flushed(timeout: int = 20) -> None:
"""
The RRef protocol holds forkIds of rrefs in a map until those forks are
confirmed by the owner. The message confirming the fork may arrive after
our tests check whether this map is empty, which leads to failures and
flaky tests. to_here also does not guarantee that we have finished
processind the owner's confirmation message for the RRef. This function
loops until the map is empty, which means the messages have been received
as processed. Call this function before asserting the map returned by
_get_debug_info is empty.
"""
start = time.time()
while True:
debug_info = _rref_context_get_debug_info()
num_pending_futures = int(debug_info["num_pending_futures"])
num_pending_users = int(debug_info["num_pending_users"])
if num_pending_futures == 0 and num_pending_users == 0:
break
time.sleep(0.1)
if time.time() - start > timeout:
raise ValueError(
"Timed out waiting to flush pending futures and users, had {} pending futures and {} pending users".format(
num_pending_futures, num_pending_users
)
)
def get_num_owners_and_forks() -> Tuple[str, str]:
"""
Retrieves number of OwnerRRefs and forks on this node from
_rref_context_get_debug_info.
"""
rref_dbg_info = _rref_context_get_debug_info()
num_owners = rref_dbg_info["num_owner_rrefs"]
num_forks = rref_dbg_info["num_forks"]
return num_owners, num_forks
def wait_until_owners_and_forks_on_rank(
num_owners: int, num_forks: int, rank: int, timeout: int = 20
) -> None:
"""
Waits until timeout for num_forks and num_owners to exist on the rank. Used
to ensure proper deletion of RRefs in tests.
"""
start = time.time()
while True:
num_owners_on_rank, num_forks_on_rank = rpc.rpc_sync(
worker_name(rank), get_num_owners_and_forks, args=(), timeout=5
)
num_owners_on_rank = int(num_owners_on_rank)
num_forks_on_rank = int(num_forks_on_rank)
if num_owners_on_rank == num_owners and num_forks_on_rank == num_forks:
return
time.sleep(1)
if time.time() - start > timeout:
raise ValueError(
"Timed out waiting {} sec for {} owners and {} forks on rank, had {} owners and {} forks".format(
timeout,
num_owners,
num_forks,
num_owners_on_rank,
num_forks_on_rank,
)
)
def initialize_pg(init_method, rank: int, world_size: int) -> None:
# This is for tests using `dist.barrier`.
if not dist.is_initialized():
dist.init_process_group(
backend="gloo",
init_method=init_method,
rank=rank,
world_size=world_size,
)
def worker_name(rank: int) -> str:
return "worker{}".format(rank)
def get_function_event(function_events, partial_event_name):
"""
Returns the first event that matches partial_event_name in the provided
function_events. These function_events should be the output of
torch.autograd.profiler.function_events().
Args:
function_events: function_events returned by the profiler.
event_name (str): partial key that the event was profiled with.
"""
event = [event for event in function_events if partial_event_name in event.name][0]
return event
| pytorch-master | torch/testing/_internal/dist_utils.py |
# Torch
from torch.autograd import Variable
from torch.autograd.function import _nested_map
from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401
from torch.onnx import OperatorExportTypes
import torch
import torch.cuda
import torch.jit
import torch.jit._logging
import torch.jit.frontend
import torch.jit.quantized
import zipfile
import functools
# Testing utils
from torch.testing import FileCheck
from torch.testing._internal.common_utils import IS_WINDOWS, \
freeze_rng_state, enable_profiling_mode_for_profiling_tests, ProfilingMode, TEST_BAILOUTS, \
is_iterable_of_tensors
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401
# Standard library
from contextlib import contextmanager
from functools import reduce
from io import StringIO
from collections import defaultdict
import importlib.util
import inspect
import io
import math
import os
import pickle
import sys
import tempfile
import textwrap
from importlib.abc import Loader
from typing import Any, Dict, List, Tuple, Union
RUN_CUDA = torch.cuda.is_available()
RUN_CUDA_MULTI_GPU = RUN_CUDA and torch.cuda.device_count() > 1
RUN_CUDA_HALF = RUN_CUDA
# HIP supports half, no version check necessary
if torch.cuda.is_available() and not torch.version.hip:
CUDA_VERSION = torch._C._cuda_getCompiledVersion()
for d in range(torch.cuda.device_count()):
major = torch.cuda.get_device_capability(d)[0]
if (major < 6):
RUN_CUDA_HALF = False
def execWrapper(code, glob, loc):
exec(code, glob, loc)
def do_input_map(fn, input):
return _nested_map(lambda t: isinstance(t, torch.Tensor), fn)(input)
def clear_class_registry():
torch._C._jit_clear_class_registry()
torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore()
torch.jit._state._clear_class_state()
def get_execution_plan(graph_executor_state):
execution_plans = list(graph_executor_state.execution_plans.values())
num_plans = len(execution_plans)
if num_plans != 1:
raise RuntimeError('This test assumes this GraphExecutor should '
'only have one execution plan, got: {}'.format(num_plans))
return execution_plans[0]
class _AssertRaisesRegexWithHighlightContext(object):
"""
A context manager that is useful for checking that error messages highlight
the correct part of the source code.
"""
def __init__(self, test_case, exception, regex, highlight):
self.test_case = test_case
self.exception_type = exception
self.regex = regex
self.highlight = highlight
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
with self.test_case.assertRaisesRegex(self.exception_type, self.regex):
if type:
raise value
if self.highlight:
FileCheck().check_source_highlighted(self.highlight).run(str(value))
return True
FUSION_GROUP = "prim::TensorExprGroup"
class JitTestCase(JitCommonTestCase):
_do_cuda_memory_leak_check = True
_restored_warnings = False
class capture_stdout(list):
"""
Replace sys.stdout with a temporary StringIO
"""
def __enter__(self):
self.sys_stdout = sys.stdout
self.stringio = StringIO()
sys.stdout = self.stringio
return self
def __exit__(self, *args):
self.append(str(self.stringio.getvalue()))
del self.stringio
sys.stdout = self.sys_stdout
class capture_stderr(list):
"""
Replace sys.stderr with a temporary StringIO
"""
def __enter__(self):
self.sys_stderr = sys.stderr
self.stringio = StringIO()
sys.stderr = self.stringio
return self
def __exit__(self, *args):
self.append(str(self.stringio.getvalue()))
del self.stringio
sys.stderr = self.sys_stderr
def setHooks(self):
torch._C._jit_set_emit_hooks(self.emitModuleHook, self.emitFunctionHook)
def clearHooks(self):
torch._C._jit_set_emit_hooks(None, None)
def setUp(self):
super().setUp()
# unittest overrides all warning filters and forces all of them to show up
# after we install our own to silence those coming from inside PyTorch.
# This will ensure that our filter still takes precedence.
if not JitTestCase._restored_warnings:
torch.jit.TracerWarning.ignore_lib_warnings()
JitTestCase._restored_warnings = True
self.setHooks()
def tearDown(self):
super().tearDown()
# needs to be cleared because python might be unloaded before
# the callback gets destucted
self.clearHooks()
clear_class_registry()
def assertAllFused(self, graph, except_for=()):
# note this helper collects nodes on 'fast path' only
# i.e. the true blocks of specialized checks
def get_nodes_and_parents_recursively(block, kind, acc):
for node in block.nodes():
if node.kind() == kind:
acc[block].append(node)
elif node.kind() == 'prim::DifferentiableGraph':
get_nodes_and_parents_recursively(node.g('Subgraph'), kind, acc)
elif node.kind() == 'prim::If' and (node.inputs().__next__().node().kind() == 'aten::all' or
node.inputs().__next__().node().kind() == 'prim::TypeCheck' or
node.inputs().__next__().node().kind() == 'prim::RequiresGradCheck'):
get_nodes_and_parents_recursively(node.blocks().__next__(), kind, acc)
else:
for inner_block in node.blocks():
get_nodes_and_parents_recursively(inner_block, kind, acc)
allowed_nodes = {'prim::Constant', FUSION_GROUP, 'prim::BailoutTemplate',
'prim::TupleConstruct', 'prim::If', 'prim::TypeCheck', 'prim::RequiresGradCheck'} | set(except_for)
fusion_groups : Dict[torch._C.Block, List[torch._C.Node]] = defaultdict(list)
get_nodes_and_parents_recursively(graph, FUSION_GROUP, fusion_groups)
self.assertTrue(len(fusion_groups) == 1, 'got {}'.format(graph))
(graph, fusion_nodes) = list(fusion_groups.items())[0]
# the block contains one FUSION_GROUP and the rest of nodes are `allowed_nodes`
self.assertTrue(len(fusion_nodes) == 1, 'got {}'.format(graph))
self.assertTrue(all(node.kind() in allowed_nodes for node in graph.nodes()),
'got {}'.format(graph))
def _isHookExceptionOk(self, e):
se = str(e)
allowed = ("Could not export Python function",
"closures are not exportable")
for a in allowed:
if a in se:
return True
return False
def _compared_saved_loaded(self, m):
def extract_files(buffer):
# crack open the zip format to get at the main module code
archive = zipfile.ZipFile(buffer)
# check that we have no duplicate names
self.assertEqual(len(set(archive.namelist())), len(archive.namelist()))
files = list(filter(lambda x: x.startswith('archive/code/'), archive.namelist()))
# unwrap all the code files into strings
code_files_str = filter(lambda x: x.endswith('.py'), files)
code_files_stream = (archive.open(f) for f in code_files_str)
code_files = ("".join([line.decode() for line in file]) for file in code_files_stream)
# unpickled all the debug files
debug_files_str = filter(lambda f: f.endswith('.debug_pkl'), files)
debug_files_stream = (archive.open(f) for f in debug_files_str)
debug_files = (pickle.load(f) for f in debug_files_stream)
return code_files, debug_files
# disable the hook while we parse code, otherwise we will re-enter the hook
with torch._jit_internal._disable_emit_hooks():
try:
# short-circuit if this is an empty function or module
if len(m.code) == 0:
return
if isinstance(m, torch._C.ScriptModule):
if len(m._method_names()) == 0:
return
# save the module to a buffer
buffer = io.BytesIO()
torch.jit.save(m, buffer)
# copy the data in the buffer so we can restore it later. This
# is because py2 and py3 have different semantics with zipfile
# and it's easier to just work with a fresh copy each time.
buffer_copy = buffer.getvalue()
code_files, debug_files = extract_files(buffer)
except RuntimeError as e:
if not self._isHookExceptionOk(e):
raise
else:
return
# import the model again (from a the copy we made of the original)
buffer2 = io.BytesIO(buffer_copy)
imported = torch.jit.load(buffer2)
# save it again
saved_module_buffer_2 = io.BytesIO()
torch.jit.save(imported, saved_module_buffer_2)
saved_module_buffer_2.seek(0)
code_files_2, debug_files_2 = extract_files(saved_module_buffer_2)
for a, b in zip(code_files, code_files_2):
self.assertMultiLineEqual(a, b)
if isinstance(m, torch._C.ScriptModule):
self.assertTrue(torch._C._ivalue_tags_match(m, imported._c))
def emitFunctionHook(self, func):
# func has invalid names for export, skip the jitter check
if func.name == "<lambda>" or "aten::" in func.name:
return
self._compared_saved_loaded(func)
def emitModuleHook(self, module):
self._compared_saved_loaded(module)
def getExportImportCopyWithPacking(self, m, also_test_file=True, map_location=None):
buffer = io.BytesIO()
m.apply(lambda s: s._pack() if s._c._has_method('_pack') else None)
torch.jit.save(m, buffer)
m.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None)
buffer.seek(0)
imported = torch.jit.load(buffer, map_location=map_location)
imported.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None)
if not also_test_file:
return imported
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
f = tempfile.NamedTemporaryFile(delete=False)
try:
f.close()
imported.save(f.name)
result = torch.jit.load(f.name, map_location=map_location)
finally:
os.unlink(f.name)
result.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None)
return result
def assertGraphContains(self, graph, kind, consider_subgraphs=False):
if consider_subgraphs:
strgraph = str(graph)
count = strgraph.count(kind) - strgraph.count('with {}'.format(kind))
self.assertTrue(count > 0)
return
def nodes(block):
out = []
for node in block.nodes():
if node.kind() == kind:
out.append(node)
for block in node.blocks():
out += nodes(block)
return out
out_nodes = nodes(graph)
self.assertTrue(len(out_nodes) > 0)
def assertGraphContainsExactly(self, graph, kind, num_kind_nodes, consider_subgraphs=False):
def perform_assert(graph, kind, actual, expected, consider_subgraphs):
if actual == expected:
return
subgraph = 'including' if consider_subgraphs else 'excluding'
raise AssertionError(
'{}\nError: graph contains {} {} nodes ({} subgraphs) but expected {}'.format(
graph, actual, kind, subgraph, expected))
if consider_subgraphs:
strgraph = str(graph)
count = strgraph.count(kind) - strgraph.count('with {}'.format(kind))
perform_assert(graph, kind, count, num_kind_nodes,
consider_subgraphs)
return
def nodes(block):
out = []
for node in block.nodes():
if node.kind() == kind:
out.append(node)
for block in node.blocks():
out += nodes(block)
return out
out_nodes = nodes(graph)
perform_assert(graph, kind, len(out_nodes), num_kind_nodes,
consider_subgraphs)
def assertExpectedONNXGraph(self, g, *args, **kwargs):
g = torch.onnx._optimize_trace(g, operator_export_type=OperatorExportTypes.ONNX)
self.assertExpectedGraph(g, *args, **kwargs)
def assertExpectedGraph(self, trace, *args, **kwargs):
if isinstance(trace, torch._C.Graph):
graph = trace
else:
graph = trace.graph()
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph)
graph = torch._C._jit_pass_canonicalize(graph)
torch._C._jit_pass_lint(graph)
self.assertExpected(str(graph), *args, **kwargs)
def run_pass(self, name, trace):
if isinstance(trace, torch._C.Graph):
graph = trace
set_graph = False
else:
set_graph = True
graph = trace.graph()
torch._C._jit_pass_lint(graph)
result = getattr(torch._C, '_jit_pass_' + name)(graph)
if result is not None and not isinstance(result, bool):
graph = result
torch._C._jit_pass_lint(graph)
if set_graph:
trace.set_graph(graph)
return graph
def get_frame_vars(self, frames_up):
frame = inspect.currentframe()
if not frame:
raise RuntimeError("failed to inspect frame")
i = 0
while i < frames_up + 1:
frame = frame.f_back
if not frame:
raise RuntimeError("failed to get frame")
i += 1
defined_vars: Dict[str, Any] = {}
defined_vars.update(frame.f_locals)
defined_vars.update(frame.f_globals)
return defined_vars
def assertRaisesRegexWithHighlight(self, exception, regex, highlight):
return _AssertRaisesRegexWithHighlightContext(self, exception, regex, highlight)
def checkScriptRaisesRegex(self, script, inputs, exception, regex,
name=None, outputs=None, capture_output=False,
frames_up=1, profiling=ProfilingMode.PROFILING):
"""
Checks that a given function will throw the correct exception,
when executed with normal python, the string frontend, and the
AST frontend. Logic taken from `checkScript` (see comments there
for details)
"""
with enable_profiling_mode_for_profiling_tests():
# Normal Python
with self.assertRaisesRegex(exception, regex):
if isinstance(script, str):
frame = self.get_frame_vars(frames_up)
the_locals: Dict[str, Any] = {}
execWrapper(script, glob=frame, loc=the_locals)
frame.update(the_locals)
python_fn = frame[name]
else:
python_fn = script
python_fn(*inputs)
# String frontend
with self.assertRaisesRegex(exception, regex):
if isinstance(script, str):
cu = torch.jit.CompilationUnit(script, _frames_up=frames_up)
string_frontend = getattr(cu, name)
else:
source = textwrap.dedent(inspect.getsource(script))
cu = torch.jit.CompilationUnit(source, _frames_up=frames_up)
string_frontend = getattr(cu, script.__name__)
string_frontend(*inputs)
# Python AST frontend
if not isinstance(script, str):
with self.assertRaisesRegex(exception, regex):
ge = torch.jit.script(python_fn)
ge(*inputs)
def checkBailouts(self, model, inputs, expected):
state = model.get_debug_state()
plan = get_execution_plan(state)
num_bailouts = plan.code.num_bailouts()
for i in range(0, num_bailouts):
plan.code.request_bailout(i)
bailout_outputs = model(*inputs)
self.assertEqual(bailout_outputs, expected)
def checkScript(self,
script,
inputs,
name='func',
optimize=True,
inputs_requires_grad=False,
capture_output=False,
frames_up=1,
profiling=ProfilingMode.PROFILING,
atol=None,
rtol=None):
"""
Checks that a given script generates the same output as the Python
version using the given inputs.
"""
with torch.jit.optimized_execution(optimize):
with enable_profiling_mode_for_profiling_tests():
extra_profile_runs = any(isinstance(x, torch.Tensor) and x.requires_grad for x in inputs)
if isinstance(script, str):
# Compile the string to a Script function
# with enable_profiling_mode():
cu = torch.jit.CompilationUnit(script, _frames_up=frames_up)
# Execute the Python function so we can run it later and get its
# outputs
frame = self.get_frame_vars(frames_up)
the_locals: Dict[str, Any] = {}
execWrapper(script, glob=frame, loc=the_locals)
frame.update(the_locals)
python_fn = frame[name]
scripted_fn = getattr(cu, name)
else:
# Check the string frontend first
source = textwrap.dedent(inspect.getsource(script))
self.checkScript(
source,
inputs,
script.__name__,
optimize=optimize,
inputs_requires_grad=inputs_requires_grad,
capture_output=capture_output,
profiling=profiling,
frames_up=2)
# Continue checking the Python frontend
scripted_fn = torch.jit.script(script, _frames_up=1)
python_fn = script
if inputs_requires_grad:
recording_inputs = do_input_map(lambda t: t.detach().requires_grad_(), inputs)
else:
recording_inputs = inputs
if capture_output:
with self.capture_stdout() as script_stdout:
script_outputs = scripted_fn(*recording_inputs)
with self.capture_stdout() as opt_script_stdout:
opt_script_outputs = scripted_fn(*recording_inputs)
with self.capture_stdout() as _python_stdout:
python_outputs = python_fn(*inputs)
if not IS_WINDOWS:
self.assertExpected(script_stdout[0], subname='stdout')
self.assertEqual(python_outputs, opt_script_outputs, atol=atol, rtol=rtol)
else:
# profiling run
script_outputs = scripted_fn(*recording_inputs)
if inputs_requires_grad or extra_profile_runs:
opt_script_outputs = scripted_fn(*recording_inputs)
# optimized run
opt_script_outputs = scripted_fn(*recording_inputs)
if TEST_BAILOUTS:
self.checkBailouts(scripted_fn, inputs, opt_script_outputs)
python_outputs = python_fn(*inputs)
self.assertEqual(python_outputs, script_outputs, atol=atol, rtol=rtol)
self.assertEqual(script_outputs, opt_script_outputs, atol=atol, rtol=rtol)
return scripted_fn
def checkTrace(self, func, reference_tensors, input_tensors=None,
drop=None, allow_unused=False, verbose=False,
inputs_require_grads=True, check_tolerance=1e-5, export_import=True,
_force_outplace=False):
# TODO: check gradients for parameters, not just inputs
def allSum(vs):
# drop allows us to remove some values from ever being used
# to test unused outputs
if drop is not None:
vs = vs[:-drop]
# we don't want all the grad for all the outputs to be the same
# so we multiply each by a constant
return sum(math.log(i + 2) * v.sum() for i, v in enumerate(vs) if v is not None)
if input_tensors is None:
input_tensors = reference_tensors
def flatten_inputs(inputs):
def input_reduce(input, fn, acc):
if isinstance(input, torch.Tensor):
fn(input, acc)
elif isinstance(input, dict):
reduce(lambda acc, key: input_reduce(input[key], fn, acc), input, acc)
else:
reduce(lambda acc, val: input_reduce(val, fn, acc), input, acc)
return acc
return tuple(input_reduce(recording_inputs, lambda t, acc: acc.append(t), []))
nograd_inputs = reference_tensors
if inputs_require_grads:
recording_inputs = do_input_map(lambda t: t.clone().requires_grad_(), reference_tensors)
flattened_recording_inputs = flatten_inputs(recording_inputs)
else:
recording_inputs = reference_tensors
# `check_trace` is set to False because check_trace is run with @no_grad
# Also, `checkTrace` already does all the checks
# against python function
ge = torch.jit.trace(func, input_tensors, check_tolerance=check_tolerance,
_force_outplace=_force_outplace, check_trace=False)
if export_import:
ge = self.getExportImportCopy(ge)
if verbose:
print(ge.graph)
# test no gradients case
outputs = func(*nograd_inputs)
outputs_ge = ge(*nograd_inputs)
self.assertEqual(outputs, outputs_ge)
# test gradients case
outputs = func(*recording_inputs)
if inputs_require_grads:
grads = torch.autograd.grad(allSum(outputs), flattened_recording_inputs,
allow_unused=allow_unused)
outputs_ge = ge(*recording_inputs)
if inputs_require_grads:
grads_ge = torch.autograd.grad(allSum(outputs_ge), flattened_recording_inputs,
allow_unused=allow_unused)
self.assertEqual(outputs, outputs_ge)
if inputs_require_grads:
self.assertEqual(grads, grads_ge)
self.assertEqual(outputs, outputs_ge)
if inputs_require_grads:
self.assertEqual(grads, grads_ge)
# test the grad grad case
outputs = func(*recording_inputs)
l1 = allSum(outputs)
if inputs_require_grads:
grads = torch.autograd.grad(l1, flattened_recording_inputs, create_graph=True,
allow_unused=allow_unused)
if inputs_require_grads:
l2 = (allSum(grads) * l1)
grads2 = torch.autograd.grad(l2, flattened_recording_inputs, allow_unused=allow_unused)
if inputs_require_grads:
recording_inputs = do_input_map(lambda t: Variable(t, requires_grad=True), reference_tensors)
flattened_recording_inputs = flatten_inputs(recording_inputs)
outputs_ge = ge(*recording_inputs)
l1_ge = allSum(outputs_ge)
if inputs_require_grads:
grads_ge = torch.autograd.grad(
l1_ge, flattened_recording_inputs, create_graph=True, allow_unused=allow_unused)
if inputs_require_grads:
l2_ge = (allSum(grads_ge) * l1_ge)
grads2_ge = torch.autograd.grad(l2_ge, flattened_recording_inputs, allow_unused=allow_unused)
self.assertEqual(outputs, outputs_ge)
if inputs_require_grads:
self.assertEqual(grads, grads_ge)
for g2, g2_ge in zip(grads2, grads2_ge):
if g2 is None and g2_ge is None:
continue
self.assertEqual(g2, g2_ge, atol=8e-4, rtol=8e-4)
return ge
def checkModule(self, nn_module, args):
"""
Check that a nn.Module's results in Script mode match eager and that it
can be exported
"""
sm = torch.jit.script(nn_module)
with freeze_rng_state():
eager_out = nn_module(*args)
with freeze_rng_state():
script_out = sm(*args)
self.assertEqual(eager_out, script_out)
self.assertExportImportModule(sm, args)
return sm
class NoTracerWarnContextManager(object):
def __enter__(self):
self.prev = torch._C._jit_get_tracer_state_warn()
torch._C._jit_set_tracer_state_warn(False)
def __exit__(self, *args):
torch._C._jit_set_tracer_state_warn(self.prev)
@contextmanager
def inline_everything_mode(should_inline):
old = torch._C._jit_get_inline_everything_mode()
torch._C._jit_set_inline_everything_mode(should_inline)
try:
yield
finally:
torch._C._jit_set_inline_everything_mode(old)
@contextmanager
def set_fusion_group_inlining(inlining):
old = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(inlining)
try:
yield
finally:
torch._C._debug_set_fusion_group_inlining(old)
# note: not re-entrant, use unnested only
@contextmanager
def disable_autodiff_subgraph_inlining(enabled=True):
torch._C._debug_set_autodiff_subgraph_inlining(not enabled)
try:
yield
finally:
torch._C._debug_set_autodiff_subgraph_inlining(True)
def _inline_everything(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
with inline_everything_mode(True):
fn(*args, **kwargs)
return wrapper
# this exists for forward compatibility reasons temporarily.
# TODO(suo) remove
def _tmp_donotuse_dont_inline_everything(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
with inline_everything_mode(False):
fn(*args, **kwargs)
return wrapper
# make it easy to quicky define/trace a function for these tests
def _trace(*args, **kwargs):
def wrapper(func):
return torch.jit.trace(func, args, **kwargs)
return wrapper
def enable_cpu_fuser(fn):
def wrapper(*args, **kwargs):
torch._C._jit_override_can_fuse_on_cpu_legacy(True)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_set_te_must_use_llvm_cpu(False)
try:
fn(*args, **kwargs)
finally:
torch._C._jit_override_can_fuse_on_cpu_legacy(False)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_set_te_must_use_llvm_cpu(True)
return wrapper
def enable_cpu_fuser_if(cond):
if cond:
return enable_cpu_fuser
else:
def noop_fuser(fn):
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
return wrapper
return noop_fuser
def get_forward(c):
return c._get_method('forward')
def get_forward_graph(c):
return c._get_method('forward').graph
def get_module_method(m, module, method):
return m._c.getattr(module)._get_method(method)
def attrs_with_prefix(module, prefix):
return [x for x, _ in module._modules._c.items()
if x.startswith(prefix)]
def warmup_backward(f, *args):
profiling_count = 3
results = []
for i in range(profiling_count):
if len(args) > 0:
r = torch.autograd.grad(f, *args)
results.append(r)
else:
f.backward(retain_graph=True)
return results
# TODO: Remove me once https://bugs.python.org/issue42666 is resolved
def make_global(*args):
for arg in args:
setattr(sys.modules[arg.__module__], arg.__name__, arg)
# Helper function to eval Python3 code without causing a syntax error for
# this file under py2
def _get_py3_code(code, fn_name):
with tempfile.TemporaryDirectory() as tmp_dir:
script_path = os.path.join(tmp_dir, 'script.py')
with open(script_path, 'w') as f:
f.write(code)
spec = importlib.util.spec_from_file_location(fn_name, script_path)
module = importlib.util.module_from_spec(spec)
loader = spec.loader
assert isinstance(loader, Loader) # Assert type to meet MyPy requriement
loader.exec_module(module)
fn = getattr(module, fn_name)
return fn
class TensorExprTestOptions():
def __init__(self):
self.old_profiling_executor = torch._C._jit_set_profiling_executor(True)
self.old_profiling_mode = torch._C._get_graph_executor_optimize(True)
self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
torch._C._jit_set_texpr_fuser_enabled(True)
self.old_fusion_inlining = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
self.old_te_must_use_llvm_cpu = torch._C._jit_get_te_must_use_llvm_cpu()
torch._C._jit_set_te_must_use_llvm_cpu(False)
self.old_nvfuser = torch._C._jit_set_nvfuser_enabled(False)
def restore(self):
torch._C._jit_set_profiling_executor(self.old_profiling_executor)
torch._C._get_graph_executor_optimize(self.old_profiling_mode)
torch._C._jit_set_texpr_fuser_enabled(self.texpr_fuser_state)
torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuser_state)
torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuser_state)
torch._C._debug_set_fusion_group_inlining(self.old_fusion_inlining)
torch._C._jit_set_te_must_use_llvm_cpu(self.old_te_must_use_llvm_cpu)
torch._C._jit_set_nvfuser_enabled(self.old_nvfuser)
def clone_inputs(args):
inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = []
for arg in args:
if isinstance(arg, torch.Tensor):
inputs.append(arg.detach().clone())
elif is_iterable_of_tensors(arg):
inputs.append([t.detach().clone() for t in arg])
else:
inputs.append(arg)
return inputs
def get_traced_sample_variant_pairs(device, dtype, op):
# tuples of (variant, sample)
outputs: List[Tuple[Any, Any]] = []
samples = op.sample_inputs(device, dtype)
# Acquires variants to test
func = op.get_op()
method = op.get_method()
variants = {
# TODO: inplace tests currently fail, fix and add inplace variant
'function': func, 'method': method,
}
# TODO: find better way to standardize on op registration itself..
has_fake_function = op.name in ["resize_", 'resize_as_']
if has_fake_function:
variants = {'method': getattr(torch.Tensor, op.name)}
# In eager mode, these ops can take (Tensor, bool) args; but in
# JIT they can only take (Tensor, Scalar), and bool is not a
# scalar in the JIT type system. So to test these in JIT, the bool
# is converted to an int for the test.
ops_with_unsupported_bool_args = [
{
"name": "div_floor_rounding",
"arg_idx": [0],
},
{
"name": "div_no_rounding_mode",
"arg_idx": [0],
},
{
"name": "div_trunc_rounding",
"arg_idx": [0],
},
{
"name": "index_fill",
"arg_idx": [2],
},
{
"name": "full_like",
"arg_idx": [0],
},
{
"name": "mul",
"arg_idx": [0],
},
{
"name": "new_full",
"arg_idx": [1],
},
]
# doesn't support tracing
if has_fake_function:
return outputs
for sample in samples:
for func_type, variant in variants.items():
if variant is None:
continue
if is_lambda(variant):
continue
matching_ops = filter(lambda x: op.formatted_name == x["name"], ops_with_unsupported_bool_args)
for op_data in matching_ops:
for idx in op_data["arg_idx"]:
args = list(sample.args)
if len(sample.args) > idx and isinstance(sample.args[idx], bool):
args[idx] = int(args[idx])
sample.args = tuple(args)
outputs.append((variant, sample))
return outputs
# types.LambdaType gave false positives
def is_lambda(lamb):
LAMBDA = lambda: 0 # noqa: E731
return isinstance(lamb, type(LAMBDA)) and lamb.__name__ == LAMBDA.__name__
| pytorch-master | torch/testing/_internal/jit_utils.py |
pytorch-master | torch/testing/_internal/generated/__init__.py |
|
pytorch-master | torch/testing/_internal/opinfo/__init__.py |
|
from dataclasses import dataclass, asdict
import collections.abc
import operator
from typing import Any, Callable, List, Optional, Tuple, Iterable
from enum import Enum
import unittest
import math
from functools import partial
from itertools import product
import torch
from torch.testing import make_tensor
from torch.testing._internal.opinfo import utils
from torchgen.utils import dataclass_repr
from torch.testing._internal.common_utils import (
is_iterable_of_tensors,
noncontiguous_like,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import (
_dispatch_dtypes,
floating_and_complex_types_and,
floating_and_complex_types,
floating_types,
)
from torch.testing._internal.common_device_type import (
skipCPUIfNoFFT,
toleranceOverride,
tol,
)
# Reasonable testing sizes for dimensions
L = 20
M = 10
S = 5
XS = 3
# Unique value to distinguish default from anything else
_NOTHING = object()
# Extension of getattr to support qualified names
# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm
def _getattr_qual(obj, name, default=_NOTHING):
try:
for path in name.split('.'):
obj = getattr(obj, path)
return obj
except AttributeError:
if default is not _NOTHING:
return default
else:
raise
class DecorateInfo(object):
"""Describes which test, or type of tests, should be wrapped in the given
decorators when testing an operator. Any test that matches all provided
arguments will be decorated. The decorators will only be applied if the
active_if argument is True."""
__slots__ = ['decorators', 'cls_name', 'test_name', 'device_type', 'dtypes', 'active_if']
def __init__(self, decorators, cls_name=None, test_name=None, *,
device_type=None, dtypes=None, active_if=True):
self.decorators = list(decorators) if isinstance(decorators, collections.abc.Sequence) else [decorators]
self.cls_name = cls_name
self.test_name = test_name
self.device_type = device_type
self.dtypes = dtypes
self.active_if = active_if
# Validate dtypes
if self.dtypes is not None:
for dtype in self.dtypes:
assert isinstance(dtype, torch.dtype)
def is_active(self, cls_name, test_name, device_type, dtype):
return (
self.active_if and
(self.cls_name is None or self.cls_name == cls_name) and
(self.test_name is None or self.test_name == test_name) and
(self.device_type is None or self.device_type == device_type) and
(self.dtypes is None or dtype in self.dtypes)
)
# FIXME
# Note: historically the 'input' kwarg had to be a Tensor or TensorList, but we are trying
# to support scalar inputs, too. Some tests still depend on 'input' being a Tensor
# or TensorList, however.
class SampleInput(object):
"""Represents sample inputs to a function."""
__slots__ = ['input', 'args', 'kwargs', 'output_process_fn_grad', 'broadcasts_input', 'name']
def __init__(self, input, *, args=tuple(), kwargs=None, output_process_fn_grad=lambda x: x, broadcasts_input=False, name=""):
# input is the first input to the op and is typically either a Tensor or TensorList (Sequence[Tensor]).
# This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...).
self.input = input
self.args = args
assert isinstance(self.args, tuple)
self.kwargs = kwargs if kwargs is not None else {}
assert isinstance(self.kwargs, dict)
self.output_process_fn_grad = output_process_fn_grad
self.name = name
# Specifies if `self.input` is broadcasted or not,
# given that the operator supports broadcasting.
# This field is used to verify the behavior for inplace variant.
#
# If a SampleInput is marked with `broadcasts_input=True`,
# it is verified that we get a `RuntimerError` with this sample,
# and inplace variant. Also inplace grad{grad} tests are skipped,
# for such inputs (as they will error out otherwise).
self.broadcasts_input = broadcasts_input
def _repr_helper(self, formatter):
# Helper function to return the details of the SampleInput as `str`
# It consolidates all the fields of SampleInput and allows,
# formatting the fields like `input`, `args`, etc with `formatter`
# callable to customize the representation.
# Look at `summary` method for example.
arguments = [
f'input={formatter(self.input)}',
f'args={formatter(self.args)}',
f'kwargs={formatter(self.kwargs)}',
f'output_process_fn_grad={self.output_process_fn_grad}',
f'broadcasts_input={self.broadcasts_input}',
f'name={repr(self.name)}']
return f'SampleInput({", ".join(a for a in arguments if a is not None)})'
def __repr__(self):
return self._repr_helper(lambda x: x)
def summary(self):
# Returns the SampleInput details in a more
# friendly format.
# It formats `Tensor` and `TensorList`
# in a more condensed representation.
def formatter(arg):
# Format any instance of `Tensor` (standalone, in list, or in dict)
# by Tensor[TensorShape]
# Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4]
if isinstance(arg, torch.Tensor):
shape = str(tuple(arg.shape)).replace('(', '').replace(')', '')
return f"Tensor[{shape}]"
elif isinstance(arg, dict):
return {k: formatter(v) for k, v in arg.items()}
elif is_iterable_of_tensors(arg):
return "TensorList[" + ", ".join(map(formatter, arg)) + "]"
elif isinstance(arg, (list, tuple)): # Handle list, tuple
return "(" + ",".join(map(formatter, arg)) + ")"
return repr(arg)
return self._repr_helper(formatter)
# Applies the transform f(t) -> t to each tensor and dtype in the SampleInput
def transform(self, f):
def tt(t):
def _tt(t):
with torch.no_grad():
return f(t)
if isinstance(t, torch.Tensor):
return _tt(t)
elif isinstance(t, torch.dtype):
return _tt(t)
elif isinstance(t, list):
return list(map(tt, t))
elif isinstance(t, tuple):
return tuple(map(tt, t))
elif isinstance(t, dict):
return {k: tt(v) for k, v in t.items()}
else:
return t
sample_tt_input, tt_args, tt_kwargs = tt(self.input), tt(self.args), tt(self.kwargs)
# Note the transformed SampleInput assumes metadata like output_process_fn_grad is still valid!
return SampleInput(
sample_tt_input,
args=tt_args,
kwargs=tt_kwargs,
output_process_fn_grad=self.output_process_fn_grad,
broadcasts_input=self.broadcasts_input,
name=self.name + "_transformed")
# Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs)
# Converts tensors to ndarrays by calling .detach().cpu().numpy() on them
# Converts dtypes by remapping them using torch_to_numpy_dtype_dict
def numpy(self):
def to_numpy(t):
if isinstance(t, torch.Tensor):
if t.dtype is torch.bfloat16:
return t.detach().cpu().to(torch.float32).numpy()
if t.dtype is torch.chalf:
return t.detach().cpu().to(torch.cfloat).numpy()
return t.detach().cpu().numpy()
elif isinstance(t, torch.dtype):
return torch_to_numpy_dtype_dict[t]
return t
return self.transform(to_numpy)
def noncontiguous(self):
def to_noncontiguous(t):
if isinstance(t, torch.Tensor):
return noncontiguous_like(t)
elif isinstance(t, torch.dtype):
return t
return t
return self.transform(to_noncontiguous)
class ErrorInput(object):
"""
A SampleInput that will cause the operation to throw an error plus information
about the resulting error.
"""
__slots__ = ['sample_input', 'error_type', 'error_regex']
def __init__(self, sample_input, *, error_type=RuntimeError, error_regex):
self.sample_input = sample_input
self.error_type = error_type
self.error_regex = error_regex
class AliasInfo(object):
"""Class holds alias information. For example, torch.abs ->
torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_
"""
def __init__(self, alias_name):
self.name = alias_name
self.op = _getattr_qual(torch, alias_name)
self.method_variant = getattr(torch.Tensor, alias_name, None)
self.inplace_variant = getattr(torch.Tensor, alias_name + "_", None)
def __call__(self, *args, **kwargs):
return self.op(*args, **kwargs)
# Note [OpInfos]
# ~~~~~~~~~~~~~~
#
# The majority of this note was written shortly after the PyTorch 1.9 release.
# If you notice it's out-of-date or think it could be improved then please
# file an issue.
#
# See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261)
# See also: "Writing Test Templates" in common_device_type.py to learn how to
# parametrize a test template using OpInfos.
# See also: PyTorch's GitHub wiki on running and writing tests
# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests
# See also: ModuleInfos, OpInfo's sister class, defined in common_modules.py
#
# An OpInfo is a collection of metadata related to a PyTorch operator. This
# metadata is used to generate tests that validate properties of the operator,
# like if it implements the correct gradient formula.
#
# WHY OPINFOS?
# ~~~~~~~~~~~~
#
# OpInfos are principally intended to do three things:
#
# 1) to allow systematic testing over all PyTorch's operators
# 2) to simplify operating testing by autogenerating many tests
# 3) to allow systems (like autograd, torchscript, fx, nnc...) to test
# against every PyTorch operator
#
# All these goals are still a work in progress. Not every operator has an
# OpInfo, and some operator tests that could be automatically generated
# still have to be written manually.
#
# It's helpful to understand that OpInfos are both about test simplification and
# modularity. PyTorch is a complicated framework with many interrelated systems,
# too many for any one person to keep track of. An OpInfo can be thought of as the
# interface between an operator implementer and those other systems. Instead of
# requiring the implementer of torch.foo understand how to test its forward
# mode AD or NNC support that's typically handled automatically just by
# defining an OpInfo.
#
# It's often surprising to OpInfo writers that just implementing an OpInfo
# typically can't verify an operator is actually implemented correctly:
#
# "If an OpInfo doesn't validate my op works as expected, what's the point
# of it?"
#
# But the point of is the above. OpInfos are intended to let you focus on testing
# the operator logic you're familiar with instead of having to write tests for
# how the operator interacts with each of PyTorch's many systems.
#
# And, OK, it turns out that SOMETIMES just writing an OpInfo DOES
# validate your op works as expected, but that's only in special
# cases. See below for details.
#
# WHAT'S AN OPINFO?
# ~~~~~~~~~~~~~~~~~
#
# So what is an OpInfo? It's a Python class that describes an operator's properties,
# like which dtypes it supports on the CPU and whether it has any aliases.
# These properties can be divided into three categories:
#
# 1) Metadata describing the operator, like the operator's name and if it
# "supports" the out kwarg.
# 2) Test directives, like "skips" that tell the test suite to skip some
# tests.
# 3) A "sample inputs" function that generates valid inputs for the operator.
#
# OpInfo attributes are described in more detail below.
#
# THE SAMPLE INPUTS FUNCTION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The "sample inputs" function merits special elaboration. This function is
# crucial to testing with OpInfos. A typical OpInfo test has to treat the operator
# as a black box. There's no structure for the test to understand or exploit.
# Without "sample inputs" it wouldn't even know how to call the OpInfo's
# operator. The sample input function saves the day by providing different
# "SampleInputs" that can be used to call the operator. A sample input
# function should have the following signature:
#
# def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs):
#
# And should return an iterable of SampleInputs (see the class description
# above). Each SampleInput defines an "input", "args", "kwargs", an
# "output_process_fn_grad" function, the "broadcasts_input" bool and a
# "name".
#
# All the "sample_inputs" functions are invoked within a `torch.no_grad()`
# environment for efficiency and correctness. As such remember to set the
# "requires_grad" flag on the inputs **after** performing any transformations
# on them.
#
# The "input" is the first argument to the operator, or the tensor that
# the method or inplace variants of the operator should be called on, and
# should be on the requested device, of the requested dtype, and its
# requires_grad attribute should be set to the requires_grad argument.
#
# "args" should contain positional arguments, and "kwargs" keyword arguments.
#
# "output_process_fn_grad" has an interesting name. It's a function that maps
# the operator's output (when given the input, args, and kwargs) to the
# portion of the output to gradcheck. For example, consider an operator
# like torch.linalg.slogdet
# (https://pytorch.org/docs/master/generated/torch.linalg.slogdet.html).
# This operator returns a tuple of two tensors, but the first tensor
# cannot be backwarded through. Its "output_process_fn_grad" filters
# this output tuple to just the second argument, which we can call backward
# on. Functions that produce a single tensor can ignore this argument.
#
# "broadcasts_input" is a bool indicated if the SampleInput causes the operator
# to broadcast the "input" argument. This is important for tests to understand
# because inplace variants of operations throw a runtime error if they
# would broadcast their input arguments, so tests that work with inplace
# variants filter SampleInputs that broadcast their input.
#
# "name" is a string that's just used for debugging. It appears when printing
# the SampleInput.
#
# Sample inputs are designed to be used with many tests, some
# that are very time consuming, so they should be a small
# set with small tensors. An elaborated set of sample inputs
# can be specified using the "reference_inputs_func" attribute.
# The "reference inputs" for an operation are an extended
# set of sample inputs that can more exhausively test an
# operator. They are used by only a few tests that are careful
# not to take too long to run. Adding reference inputs
# is highly encouraged!
#
# THE (OPTIONAL) ERROR INPUTS FUNCTION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# OpInfos may optionally specify "error inputs" through an error function. If
# specified test_errors in test_ops.py will call the op with these inputs
# and validate that the desired error is thrown.
#
# Error inputs automate a common testing pattern where multiple inputs are
# passed to an operation and the errors they thrown are reviewed. Tests
# written in this style should be ported to the new OpInfo pattern.
#
# Error inputs are specified using the ErrorInputs class, which contains
# a SampleInput (see above) and data about the expected error.
#
# OPINFO FILE ORGANIZATION
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# All OpInfos are currently defined in this file. Most OpInfo tests are defined
# in test_ops.py, but some system-specific tests are defined in those
# systems' test files, and subclass-specific tests are defined in the test
# file that corresponds to that subclass (see the below).
# Expect a reorganization in the future.
#
# WHAT'S TESTED?
# ~~~~~~~~~~~~~~
#
# Every OpInfo in the op_db sequence has the following properties validated in
# test_ops.py:
#
# - that its supported dtypes are specified correctly
# - that the operation produces the same results when called with noncontiguous inputs
# - that it supports the out= argument properly (if it allows out=),
# see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch
# - that it works with the conjugate view bit properly
# - that its function, method, and inplace variants perform the same operation
# (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all
# do the same thing).
# - that its inplace variant preserves the input's storage
# - that its gradient formula is implemented correctly, and that it supports
# gradgrad and complex grad and gradgrad and forward mode AD properly for
# the op's function and inplace variants (method variants are skipped
# to reduce test time).
# - that the operation performs the same operation when traced or scripted
# using the jit
# - that the operation is autodifferentiated by the jit as expected
# - that the operator's aliases, if any, perform the same operation and that
# the jit understands the alias
# - that the operator throws the correct errors (if error_inputs is defined)
# - that the operator produces the same results as a NumPy reference (if ref is defined)
# - that the operator produces the same results as a NumPy reference on an extended
# set of "reference inputs" (if both ref and reference_inputs_func are defined)
# (NOTE: elementwise unary and elementwise binary OpInfos do this even if only
# ref is defined, because they effectively autogenerate reference inputs)
# - that the operator works on different CUDA devices
#
# Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py,
# and test_fx.py. These tests validate that operators work with NNC and FX
# as expected.
#
# For performance, some of the above tests may only run on the first
# SampleInput returned by an OpInfo's sample input function.
#
# In addition to these tests, some subclasses (discussed in the next section)
# define additional tests.
#
# Critically, as mentioned above, what's not necessarily tested is that the operator
# works as expected. When implementing an OpInfo an engineer must still
# typically write one or more tests validating the operator's behavior.
# The exception to this is if reference testing is sufficient, or if
# the operation belongs to an OpInfo subclass that has more exhaustive
# operator testing. Elementwise unary and elementwise binary operators,
# in particular, usually don't require additional testing beyond
# writing an Opinfo.
#
#
# OPINFO (SUB)CLASSES
# ~~~~~~~~~~~~~~~~~~~
#
# In addition to the OpInfo base class there are several specialized OpInfo
# subclasses. For example, the UnaryUfuncInfo subclass is used for
# unary elementwise operations. These operations have a common structure
# that test_unary_ufuncs.py exploits with additional automated testing.
# The automated testing in test_unary_ufuncs.py is so thorough, comparing
# the operator to a NumPy reference function on a plethora of values, that
# just implementing an OpInfo for a unary elementwise operation is often
# sufficient testing.
#
# The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a
# very unique class of operations. These OpInfos aren't included in the
# op_db sequence and have their own tests.
#
# Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience
# when writing OpInfos.
#
# TESTING A NEW OPERATOR
# ~~~~~~~~~~~~~~~~~~~~~~
#
# If you're adding a new operator to any of the following namespaces:
# - torch
# - torch.fft
# - torch.linalg,
# - torch.special
# - torch.nn.functional
# then you should typically add an OpInfo for it.
#
# As mentioned a couple times above, implementing an OpInfo is not
# usually sufficient testing (unless the operator is a unary or binary elementwise
# operator). The OpInfo will only test the properties described in the
# "WHAT'S TESTED" section. It DOES NOT necessarily verify that the operator is
# implemented correctly.
#
# TIPS FOR WRITING AN OPINFO AND OPINFO TESTS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to
# be consumed by a variety of systems it can be hard to understand how to
# deal with test failures or how to set the OpInfo metadata properly.
#
# Before adding an OpInfo it helps to look at other OpInfos. A sample inputs
# function must be defined, and the operator's dtypes must be specified.
# Once that's done you should run the operator's tests in test_ops.py
# (these can be filtered using the "-k" argument in pytest). Tests that
# fail should provide an error message that describes what to change about
# your OpInfo. You don't need to worry about changing an OpInfo's default
# values unless a test yells at you.
#
# Similarly, if you're writing a test that consumes OpInfos then it's critical
# your test provides a clear error message describing what to do when it
# fails. You should not assume the OpInfo implementer is familiar with your
# system.
#
# If you see a confusing error message while developing an OpInfo then please
# file an issue describing what happened.
#
# This trial-and-error approach to writing an OpInfo can be frustrating,
# but it's probably necessary as long as OpInfos don't require
# learning about all the systems that consume them. One thing that can help
# is the get_supported_dtypes() function defined in utils.py. This
# function can be used to programmatically specify the dtypes an operator
# supports, and is especially useful if writing an OpInfo on a machine
# without a CUDA device. See its documentation for more details.
#
# THE FUTURE OF OPINFOS AND OPINFO TESTING
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# In the future we expect OpInfo coverage to improve and cover
# the great majority of PyTorch's (public) operators.
#
# Classes and methods for the operator database
@dataclass
class OpInfo(object):
"""Operator information and helper functions for acquiring it."""
# the string name of the function
name: str
# An optional reference function that accepts ndarrays (AKA "NumPy arrays").
# If given, the op will be compared with its reference on each of its sample inputs.
ref: Optional[Callable] = None
# the following metadata describes the operator, its variants, and its aliases, if any
# iterable of aliases, e.g. ("absolute",) for torch.abs
aliases: Iterable = None
# additional string to include in the test name
# this is useful when an op needs multiple OpInfos,
# like divide does, often because it's really several
# different ops behind the scenes
variant_test_name: str = ''
# the function variant of the operation, populated as torch.<name> if None
op: Callable = None
# allows the method variant of this operation to be specified as follows:
# - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name
# - if None, then the OpInfo explicitly specifies is has no associated method
# - if a Callable, then that callable should be the method associated with this operation
method_variant: Callable = _NOTHING
# allows the inplace variant of this operation to be specified as follows:
# - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name
# - if None, then the OpInfo explicitly specifies is has no associated inplace variant
# - if a Callable, then that callable should be the inplace variant associated with this operation
inplace_variant: Callable = _NOTHING
# allows the operator variant of this operation to be specified as follows:
# - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name
# - if None, then the OpInfo explicitly specifies is has no associated operator
# - if a Callable, then that callable should be the operator associated with this operation
operator_variant: Callable = _NOTHING
# allows the inplace operator variant of this operation to be specified as follows:
# - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name
# - if None, then the OpInfo explicitly specifies is has no associated inplace operator
# - if a Callable, then that callable should be the inplace operator associated with this operation
inplace_operator_variant: Callable = _NOTHING
# the following metadata are test directives for skipping or modifying tests
# information about which tests to skip
skips: Tuple = tuple()
# decorators to apply to generated tests
decorators: Tuple = tuple()
# the following are pointers to functions to generate certain classes of inputs
# function to generate sample inputs with strided layouts
sample_inputs_func: Callable = None
# function to generate a more thorough set of samples inputs with strided layouts
reference_inputs_func: Callable = None
# function to generate inputs that will throw errors
error_inputs_func: Callable = None
# function to generate sample inputs with sparse coo layouts
sample_inputs_sparse_coo_func: Callable = None
# function to generate sample inputs with sparse csr layouts
sample_inputs_sparse_csr_func: Callable = None
# function to generate sample inputs with sparse csc layouts
sample_inputs_sparse_csc_func: Callable = None
# function to generate sample inputs with sparse bsr layouts
sample_inputs_sparse_bsr_func: Callable = None
# function to generate sample inputs with sparse bsc layouts
sample_inputs_sparse_bsc_func: Callable = None
# the following metadata relates to dtype support and is tested for correctness in test_ops.py
# dtypes this function works with on the CPU,
# inherited by other device types that don't specify their own dtypes
dtypes: _dispatch_dtypes = None
# the following dtypesIf... options override the dtypes value on their respective device types
# dtypes this function is expected to work with on CUDA
dtypesIfCUDA: _dispatch_dtypes = None
# dtypes this function is expected to work with on ROCM
dtypesIfROCM: _dispatch_dtypes = None
# backward dtypes this function is expected to work with
backward_dtypes: _dispatch_dtypes = None
# backward dtypes this function is expected to work with on CUDA
backward_dtypesIfCUDA: _dispatch_dtypes = None
# backward dtypes this function is expected to work with on ROCM
backward_dtypesIfROCM: _dispatch_dtypes = None
# the following metadata describes the operators out= support
# whether the op supports the out kwarg
# defaults to True, if the op does not allow the out kwarg or
# supports it incorrectly then test_out in test_ops.py should fail
supports_out: bool = True
# the following metadata relates to autograd support
# whether the operation supports backward mode AD
# if true, gradient correctness is tested in test_ops.py
# using the op's sample inputs
supports_autograd: bool = True
# whether the op supports second order gradients
# if true, gradgrad correctness is tested in test_ops.py
# defaults to support_autograd's value
# TODO: rename this to supports_bwgrad_bwgrad to be consistent with below
supports_gradgrad: bool = None
# whether the ops supports second order gradients via
# forward-over-reverse. If True, forward-over-reverse gradgrad correctness
# is tested. If False, test that forward grad is not implemented.
# Defaults to False.
supports_fwgrad_bwgrad: bool = False
# whether the operation supports inplace autograd
# if true, tested in test_ops.py
# defaults to supports_autograd's value
supports_inplace_autograd: bool = None
# Whether the operation support forward mode AD
# If the value is True, we check that the gradients are correct
# If the value is False, we test that forward grad is not implemented
supports_forward_ad: bool = False
# wrapper function for gradcheck
gradcheck_wrapper: Callable = lambda op, *args, **kwargs: op(*args, **kwargs)
# whether to check batched grad when doing gradcheck
# defaults to support_autograd's value
check_batched_grad: bool = None
# whether to check batched grad grad when doing gradgradcheck
# default's to support_gradgrad's value
check_batched_gradgrad: bool = None
# whether to check batched forward grad when doing gradcheck
# defaults to the value of `supports_forward_ad`
check_batched_forward_grad: bool = None
# whether to check batched forward grad when doing gradcheck
# defaults to the value of `check_batched_forward_grad`
check_inplace_batched_forward_grad: bool = None
# tolerance for nondeterminism while performing gradcheck
gradcheck_nondet_tol: float = 0.0
# Whether to use the fast implmentation for gradcheck/gradgradcheck.
# When set to None, defers to the default value provided by the wrapper
# function around gradcheck (testing._internal.common_utils.gradcheck)
gradcheck_fast_mode: bool = None
# the following metadata relates to JIT support and is tested for correctness in test_ops.py
# name of the corresponding aten:: operator
aten_name: str = None
# if this is a composite implicit autograd op, the decomposed op
decomp_aten_name: Optional[str] = None
# name of the corresponding aten:: operator for backwards
aten_backward_name: Optional[str] = None
# if a op's aten::node is expected to be symbolically autodiffed
assert_autodiffed: bool = False
# a list of strings with node names that are expected to be in a
# DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'],
# default is populated to be ['aten::(name of Python operator)']
autodiff_nonfusible_nodes: List[str] = None
# a list of strings with node names that are expected to be in FusionGroups
# inside of DifferentiableGraphs when this operation is autodiffed.
# Ex: ['aten::add', 'aten::mm'], defaults to an empty list
# Note: currently no ops use fusible nodes
autodiff_fusible_nodes: List[str] = None
# the following metadata relates to sparse support and is used in test_sparse.py
# whether the op supports sparse inputs
supports_sparse: bool = False
# only run tracing tests
supports_scripting: bool = True
# if the operator can be traced
supports_tracing: bool = True
# the following metadata relates to sparse csr support and is used in test_sparse_csr.py
# whether the op supports sparse csr inputs
supports_sparse_csr: bool = False
# whether the op supports sparse csc inputs
supports_sparse_csc: bool = False
# whether the op supports sparse bsr inputs
supports_sparse_bsr: bool = False
# whether the op supports sparse bsc inputs
supports_sparse_bsc: bool = False
# the following metadata relates to complex support and is checked in test_ops.py
test_conjugated_samples: bool = True
test_neg_view: bool = True
# assert that jit shape analysis fully propagates shape
assert_jit_shape_analysis: bool = False
# the following metadata relates to ExpandedWeights support and is checked in test_expanded_weights.py
supports_expanded_weight: bool = False
is_factory_function: bool = False
def __post_init__(self):
self._original_opinfo_args = asdict(self).copy()
assert self.dtypes is not None, "OpInfo for {0} has no dtypes!".format(self.name)
dtypes_args = (self.dtypes, self.dtypesIfCUDA, self.dtypesIfROCM)
# Validates the dtypes are generated from the dispatch-related functions
for dtype_list in dtypes_args:
assert isinstance(dtype_list, (_dispatch_dtypes, type(None)))
if self.aten_name is None:
self.aten_name = self.name
# Attribute to verify dynamic_dtypes are used.
self.dynamic_dtypes = any(map(lambda dtypes: isinstance(
dtypes, utils._dynamic_dispatch_dtypes), dtypes_args))
if self.dynamic_dtypes:
# Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU
# This is because, below we set dtypesIfCUDA to dtypes if they are None.
assert isinstance(self.dtypesIfCUDA, utils._dynamic_dispatch_dtypes), \
(f"To use dynamic dypes for operator {self.name}, "
"acquire the dtypes dynamically for argument `dtypesIfCUDA`."
"This is to ensure that CUDA dtypes are acquired correctly as they"
"differ from CPU dtypes occasionally")
self.dtypes = set(self.dtypes)
# NOTE: backward dtypes must be acquired before forward dtypes
# since they fallback to explicit (not implicit!) specifications of
# forward dtypes
self.backward_dtypesIfROCM = set(self.backward_dtypesIfROCM) if self.backward_dtypesIfROCM is not None else (
self.backward_dtypesIfCUDA if self.backward_dtypesIfCUDA is not None
else self.backward_dtypes if self.backward_dtypes is not None
else self.dtypesIfROCM if self.dtypesIfROCM is not None
else self.dtypesIfCUDA if self.dtypesIfCUDA is not None
else self.dtypes)
self.backward_dtypesIfCUDA = set(self.backward_dtypesIfCUDA) if self.backward_dtypesIfCUDA is not None else (
self.backward_dtypes if self.backward_dtypes is not None
else self.dtypesIfCUDA if self.dtypesIfCUDA is not None
else self.dtypes)
self.backward_dtypes = set(self.backward_dtypes) if self.backward_dtypes is not None else self.dtypes
self.dtypesIfCUDA = set(self.dtypesIfCUDA) if self.dtypesIfCUDA is not None else self.dtypes
self.dtypesIfROCM = set(self.dtypesIfROCM) if self.dtypesIfROCM is not None else self.dtypesIfCUDA
# NOTE: if the op is unspecified it is assumed to be under the torch namespace
if not self.op:
self.op = _getattr_qual(torch, self.name)
if self.method_variant is _NOTHING:
self.method_variant = getattr(torch.Tensor, self.name, None)
# attributes like real, imag are not callable
if not callable(self.method_variant):
self.method_variant = None
if self.inplace_variant is _NOTHING:
inplace_name = self.name + "_"
self.inplace_variant = getattr(torch.Tensor, inplace_name, None)
if self.operator_variant is _NOTHING:
self.operator_variant = getattr(operator, self.name, None)
if self.inplace_operator_variant is _NOTHING:
# Note: operator.i<op> will use operator.<op> and assign the result to the lhs when no
# __i<op>__ method is found. This results in the appearance of an inplace operator variant which
# does not have the correct inplace behavior. To avoid this, we guard automatic detection of the inplace
# operator with a check that an inplace variant exists.
if self.inplace_variant is not None:
inplace_operator_name = "i" + self.name
self.inplace_operator_variant = getattr(operator, inplace_operator_name, None)
else:
self.inplace_operator_variant = None
self.decorators = (*self.decorators, *self.skips)
# We run the sampling functions without tracking the gradiends of the creation of inputs
self.sample_inputs_func = torch.no_grad()(self.sample_inputs_func)
self.sample_inputs_sparse_coo_func = torch.no_grad()(self.sample_inputs_sparse_coo_func)
self.sample_inputs_sparse_csr_func = torch.no_grad()(self.sample_inputs_sparse_csr_func)
self.sample_inputs_sparse_csc_func = torch.no_grad()(self.sample_inputs_sparse_csc_func)
self.sample_inputs_sparse_bsr_func = torch.no_grad()(self.sample_inputs_sparse_bsr_func)
self.sample_inputs_sparse_bsc_func = torch.no_grad()(self.sample_inputs_sparse_bsc_func)
if self.reference_inputs_func is not None:
self.reference_inputs_func = torch.no_grad()(self.reference_inputs_func)
if not self.autodiff_fusible_nodes:
self.autodiff_fusible_nodes = []
if self.autodiff_nonfusible_nodes is None:
self.autodiff_nonfusible_nodes = ['aten::' + self.name]
# Autograd support
# Autograd flags that depend on backward AD only
# - If setting has been explicitly set, raise error if inconsistent
if self.supports_gradgrad is None:
self.supports_gradgrad = self.supports_autograd
else:
assert not (self.supports_gradgrad and not self.supports_autograd), (
"supports_gradgrad refines the part of autograd is supported, so it should "
"not be set if supports_autograd is False")
if self.check_batched_grad is None:
self.check_batched_grad = self.supports_autograd or self.supports_forward_ad
else:
assert not (self.check_batched_grad and not (self.supports_autograd or self.supports_forward_ad)), (
"check_batched_grad refines the part of autograd that will be checked (by gradcheck), so "
"it should not be set if supports_autograd is False")
if self.check_batched_gradgrad is None:
self.check_batched_gradgrad = self.supports_gradgrad
else:
assert not (self.check_batched_gradgrad and not self.supports_gradgrad), (
"check_batched_gradgrad refines the part of autograd that will be checked (by "
"gradgradcheck), so it should not be set if either supports_gradgrad or supports_autograd "
"is False.")
if self.check_batched_forward_grad is None:
self.check_batched_forward_grad = self.supports_forward_ad
else:
assert not (self.check_batched_forward_grad and not self.supports_forward_ad), (
"check_batched_forward_grad should only be used when supports_forward_ad "
"is True. It is used to disable the test in the specific cases "
"where the op supports forward ad but fails to compute "
"batched forward grad.")
if self.check_inplace_batched_forward_grad is None:
self.check_inplace_batched_forward_grad = self.check_batched_forward_grad
else:
assert not (self.check_inplace_batched_forward_grad and not self.check_batched_forward_grad), (
"check_batched_forward_grad should only be used when check_batched_forward_grad "
"is True. It is used to disable the test in the specific cases "
"where the op supports batched forward grad but fails to compute batched forward "
"grad for the inplace variant of the op.")
assert not (self.supports_fwgrad_bwgrad and not self.supports_autograd), (
"supports_fwgrad_bwgrad enables forward-over-backward gradgrad checks and should only be "
"True if backward ad is also checked, i.e., supports_forward_ad should be True.", self.name)
# Autograd flags that depend on both forward AD and backward AD
if self.supports_inplace_autograd is None:
self.supports_inplace_autograd = self.supports_autograd or self.supports_forward_ad
else:
assert not (self.supports_inplace_autograd and not self.supports_autograd and not self.supports_forward_ad), (
"supports_inplace_autograd refines the part of autograd that is supported, so "
"it should not be set if both supports_autograd and supports_forward_ad are False")
if self.aliases is not None:
self.aliases = tuple(AliasInfo(a) for a in self.aliases) # type: ignore[assignment]
else:
self.aliases = ()
def __call__(self, *args, **kwargs):
"""Calls the function variant of the operator."""
return self.op(*args, **kwargs)
def __str__(self):
return dataclass_repr(self)
def get_op(self):
"""Returns the function variant of the operator, torch.<op_name>."""
return self.op
def get_method(self):
"""Returns the method variant of the operator, torch.Tensor.<op_name>.
Returns None if the operator has no method variant.
"""
return self.method_variant
def get_inplace(self):
"""Returns the inplace variant of the operator, torch.Tensor.<op_name>_.
Returns None if the operator has no inplace variant.
"""
return self.inplace_variant
def get_operator(self):
"""Returns operator variant of the operator, e.g. operator.neg
Returns None if the operator has no operator variant.
"""
return self.operator_variant
def get_inplace_operator(self):
"""Returns the inplace operator variant of the operator, e.g operator.iadd
Returns None if the operator has no inplace operator variant"""
return self.inplace_operator_variant
def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs but with the tensor input or first
tensor in a sequence input conjugated.
"""
samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
conj_samples = list(samples)
def conjugate(tensor):
_requires_grad = tensor.requires_grad
tensor = tensor.conj()
return tensor.requires_grad_(_requires_grad)
for i, sample in enumerate(samples):
sample = conj_samples[i]
# Note: it is assumed that the input here is either a tensor or tensorlist
if isinstance(sample.input, torch.Tensor):
sample.input = conjugate(sample.input)
else:
sample.input[0] = conjugate(sample.input[0])
return tuple(conj_samples)
def sample_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""
Returns an iterable of SampleInputs.
These samples should be sufficient to test the function works correctly
with autograd, TorchScript, etc.
"""
samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
if kwargs.get('include_conjugated_inputs', False):
conj_samples = self.conjugate_sample_inputs(device, dtype, requires_grad, **kwargs)
samples_list = list(samples)
samples_list.extend(conj_samples)
samples = tuple(samples_list)
return samples
def reference_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""
Returns an iterable of SampleInputs.
Distinct from sample_inputs() above because this returns an expanded set
of inputs when reference_inputs_func is defined. If undefined this returns
the sample inputs.
"""
if self.reference_inputs_func is None:
return self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
if kwargs.get('include_conjugated_inputs', False):
raise NotImplementedError
return self.reference_inputs_func(self, device, dtype, requires_grad, **kwargs)
def error_inputs(self, device, **kwargs):
"""
Returns an iterable of ErrorInputs.
"""
return self.error_inputs_func(self, device, **kwargs)
def sample_inputs_sparse_coo(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs that contain inputs with sparse
coo layout.
"""
return self.sample_inputs_sparse_coo_func(self, device, dtype, requires_grad, **kwargs)
def sample_inputs_sparse_csr(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs that contain inputs with sparse
csr layout.
"""
return self.sample_inputs_sparse_csr_func(self, device, dtype, requires_grad, **kwargs)
def sample_inputs_sparse_csc(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs that contain inputs with sparse
csc layout.
"""
return self.sample_inputs_sparse_csc_func(self, device, dtype, requires_grad, **kwargs)
def sample_inputs_sparse_bsr(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs that contain inputs with sparse
bsr layout.
"""
return self.sample_inputs_sparse_bsr_func(self, device, dtype, requires_grad, **kwargs)
def sample_inputs_sparse_bsc(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs that contain inputs with sparse
bsc layout.
"""
return self.sample_inputs_sparse_bsc_func(self, device, dtype, requires_grad, **kwargs)
def get_decorators(self, test_class, test_name, device, dtype):
'''Returns the decorators targeting the given test.'''
result = []
for decorator in self.decorators:
if isinstance(decorator, DecorateInfo):
if decorator.is_active(test_class, test_name, device, dtype):
result.extend(decorator.decorators)
else:
result.append(decorator)
return result
def supported_dtypes(self, device_type):
if device_type == 'cpu':
return self.dtypes
if device_type == 'cuda':
return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA
else:
return self.dtypes
def supported_backward_dtypes(self, device_type):
if not self.supports_autograd:
return set()
backward_dtypes = None
if device_type == 'cpu':
backward_dtypes = self.backward_dtypes
elif device_type == 'cuda':
backward_dtypes = self.backward_dtypesIfROCM if TEST_WITH_ROCM else self.backward_dtypesIfCUDA
else:
backward_dtypes = self.backward_dtypes
allowed_backward_dtypes = floating_and_complex_types_and(torch.bfloat16, torch.float16, torch.complex32)
return set(allowed_backward_dtypes).intersection(backward_dtypes)
def supports_dtype(self, dtype, device_type):
return dtype in self.supported_dtypes(device_type)
@property
def formatted_name(self):
"""Returns a formatted full name for this OpInfo that can be used in test names."""
variant = '_' + self.variant_test_name.replace('.', '_') if self.variant_test_name else ''
return '{}{}'.format(self.name.replace('.', '_'), variant)
# NOTE [Python References]
# Python References emulate existing PyTorch operations, but can ultimately
# be expressed in terms of "primitive" operations from torch._prims.
#
# These references are experimental.
# See https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-0/577
# for additional context.
#
# Python Reference OpInfos should be added to the python_ref_db list below.
# Tests can opt-into running on these references by including
# that list in the Sequence they pass to the @ops decorator.
#
# When a Python Reference OpInfo is constructed a pointer to an
# existing OpInfo must be provided using the torch_opinfo_name kwarg.
# The existing OpInfo with that name and no variant will be found
# to inherit from.
#
# Instead of just inheriting the existing OpInfo's metadata, the
# Python Reference OpInfos inherit the existing OpInfo's
# construction arguments. These arguments can be overridden
# by adding kwargs to the constructor.
def _find_referenced_opinfo(referenced_name, variant_name):
'''
Finds the OpInfo with the given name that has no variant name.
'''
from torch.testing._internal.common_methods_invocations import op_db
for opinfo in op_db:
if opinfo.name == referenced_name and opinfo.variant_test_name == variant_name:
return opinfo
def _inherit_constructor_args(name, op, inherited, overrides):
# inherits metadata
common_kwargs = {
'name': name,
'op': op,
'aliases': None, # TODO add a check for alias coverage
'method_variant': None,
'inplace_variant': None, # TODO: add a check for inplace coverage
'supports_scripting': False,
}
# Acquires inherited kwargs
kwargs = inherited.copy()
# Fixes metadata
if 'kwargs' in kwargs:
kwargs.update(kwargs['kwargs'])
del kwargs['kwargs']
if 'self' in kwargs:
del kwargs['self']
if '__class__' in kwargs:
del kwargs['__class__']
if 'skips' in kwargs:
del kwargs['skips']
if 'decorators' in kwargs:
del kwargs['decorators']
# Overrides metadata
kwargs.update(common_kwargs)
kwargs.update(overrides)
# At the moment no prims support autograd, so we must not run autograd
# tests e.g. when testing dtype support. Once we start writing autograd
# formulas for prims this can be removed.
kwargs['supports_autograd'] = False
kwargs['supports_gradgrad'] = False
kwargs['supports_fwgrad_bwgrad'] = False
kwargs['supports_inplace_autograd'] = False
kwargs['supports_forward_ad'] = False
return kwargs
class PythonRefInfo(OpInfo):
'''
An OpInfo for a Python reference of an OpInfo base class operation.
'''
def __init__(
self,
name, # the stringname of the callable Python reference
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
torch_opinfo_name, # the string name of the corresponding torch opinfo
torch_opinfo_variant_name='', # the variant name for corresponding torch opinfo
validate_view_consistency=True,
supports_nvfuser=True,
**kwargs): # additional kwargs override kwargs inherited from the torch opinfo
self.torch_opinfo_name = torch_opinfo_name
self.torch_opinfo_variant_name = torch_opinfo_variant_name
self.torch_opinfo = _find_referenced_opinfo(torch_opinfo_name, torch_opinfo_variant_name)
self.validate_view_consistency = validate_view_consistency
self.supports_nvfuser = supports_nvfuser
assert isinstance(self.torch_opinfo, OpInfo)
inherited = self.torch_opinfo._original_opinfo_args
ukwargs = _inherit_constructor_args(name, op, inherited, kwargs)
super(PythonRefInfo, self).__init__(**ukwargs)
def _generate_reduction_inputs(device, dtype, requires_grad, **kwargs):
"""Generates input tensors for testing reduction operators"""
yield make_tensor([], dtype=dtype, device=device, requires_grad=requires_grad)
yield make_tensor([2], dtype=dtype, device=device, requires_grad=requires_grad)
yield make_tensor([3, 5], dtype=dtype, device=device, requires_grad=requires_grad)
yield make_tensor([3, 2, 1, 2], dtype=dtype, device=device, requires_grad=requires_grad)
def _generate_reduction_kwargs(ndim, supports_multiple_dims=True):
"""Generates a subset of all valid dim and keepdim kwargs given ndim that
is appropriate for testing reduction operators.
"""
# Test default dim and keepdim
yield {}
# Test reducing inner and outer most dimensions
yield {'dim': 0, 'keepdim': True}
yield {'dim': -1, 'keepdim': False}
# Test reducing middle dimension
if ndim > 2:
yield {'dim': ndim // 2, 'keepdim': True}
if supports_multiple_dims:
# Test reducing all dimensions
yield {'dim': tuple(range(ndim)), 'keepdim': False}
# Test reducing both first and last dimensions
if ndim > 1:
yield {'dim': (0, -1), 'keepdim': True}
# Test reducing every other dimension starting with the second
if ndim > 3:
yield {'dim': tuple(range(1, ndim, 2)), 'keepdim': False}
def sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for reduction operators."""
# TODO(@heitorschueroff) Once all reduction operators are using
# ReductionOpInfo use op_info.supports_multiple_dims directly.
supports_multiple_dims: bool = kwargs.get('supports_multiple_dims', True)
# TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo
# use op_info.generate_args_kwargs directly.
generate_args_kwargs = kwargs.get('generate_args_kwargs', lambda *args, **kwargs: (yield tuple(), {}))
for t in _generate_reduction_inputs(device, dtype, requires_grad):
for reduction_kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):
for args, kwargs in generate_args_kwargs(t, **reduction_kwargs):
kwargs.update(reduction_kwargs)
yield SampleInput(t.detach().requires_grad_(requires_grad), args=args, kwargs=kwargs)
# NOTE [Reductions]:
#
# For testing purposes, we relax the definition of a reduction operator
# as defined in the docstring below. We do this to capture operators with
# a similar API so they can be tested automatically. However...
#
# Strictly speaking a reduction operator is an operator that can reduce an
# array to a single scalar value and that can be computed from the partial
# result of reducing subarrays. This usually means that the reduction operation
# should be commutative and associative. This definition is important when it
# comes to implementation as it determines how a reduction can be parallelized.
#
# For example, many summary statistics such as median, mode and quantile cannot
# be computed from partial results because these are sorting and counting based
# algorithms that need information that would be lost in the reduced value.
class ReductionOpInfo(OpInfo):
"""Reduction operator information.
An operator is a reduction operator if it reduces one or more dimensions of
the input tensor to a single value. Reduction operators must implement the
following signature:
- `op(input, *args, *, dim=None, keepdim=False, **kwargs) -> Tensor`
ReductionOpInfo tests that reduction operators implement a consistent API.
Optional features such as reducing over multiple dimensions are captured in
the optional keyword parameters of the ReductionOpInfo constructor.
If a reduction operator does not yet implement the full required API of
reduction operators, this should be documented by xfailing the failing
tests rather than adding optional parameters to ReductionOpInfo.
NOTE
The API for reduction operators has not yet been finalized and some
requirements may change.
See tests in test/test_reductions.py
"""
def __init__(
self, name, *,
# The identity value for the operator if it has one.
identity: Optional[Any] = None,
# The nan policy for the operator if it implements one.
# - propagate: NaN values are propagated to the output
# - omit: NaN values are discarded during the reduction
nan_policy: Optional[str] = None,
# Whether the operator supports reducing multiple dimensions.
supports_multiple_dims: bool = True,
# Whether the operator promotes integral to floating point dtypes.
promotes_int_to_float: bool = False,
# Whether the operator promotes all integral dtypes to int64.
promotes_int_to_int64: bool = False,
# If a specific dtype is given, then the operator always returns that
# dtype irrespective of the input dtype. If None, the operator returns
# the dtype according to the type promotion rules above.
result_dtype: Optional[torch.dtype] = None,
# Casts complex results to real (e.g. linalg.norm or torch.var)
complex_to_real: bool = False,
# ReductionOpInfo tests generate their own input, dim and keepdim
# arguments and call this function to generate tuples of extra args and
# kwargs to use when calling the op. This is required for operators that
# have other required parameters besides the input tensor.
generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: (yield tuple(), {}),
# Options from the OpInfo base class
**kwargs,
):
self._original_reduction_args = locals().copy()
assert nan_policy in (None, 'propagate', 'omit')
# These are mutually exclusive options
assert not (result_dtype and promotes_int_to_float)
assert not (result_dtype and promotes_int_to_int64)
assert not (result_dtype and complex_to_real)
assert not (promotes_int_to_float and promotes_int_to_int64)
# Default sample_inputs_func for ReductionOpInfo which augments sample
# inputs from sample_inputs_reduction with the args and kwargs from
# generate_args_kwargs. This is only used if sample_inputs_func is None.
def sample_inputs_func(*args, **kwargs):
kwargs['supports_multiple_dims'] = supports_multiple_dims
kwargs['generate_args_kwargs'] = generate_args_kwargs
yield from sample_inputs_reduction(*args, **kwargs)
# Override OpInfo defaults and call base class __init__
kwargs.setdefault('inplace_variant', None)
kwargs.setdefault('sample_inputs_func', sample_inputs_func)
super().__init__(name, **kwargs)
self.identity = identity
self.nan_policy = nan_policy
self.supports_multiple_dims = supports_multiple_dims
self.promotes_int_to_float = promotes_int_to_float
self.promotes_int_to_int64 = promotes_int_to_int64
self.complex_to_real = complex_to_real
self.result_dtype = result_dtype
self.generate_args_kwargs = generate_args_kwargs
# The base reference input generation for elementwise binary operations
def _reference_inputs_elementwise_binary(op, device, dtype, requires_grad, exclude_zero, **kwargs):
yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs)
yield from generate_elementwise_binary_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
if dtype is not torch.bool:
yield from generate_elementwise_binary_small_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
)
if dtype not in (torch.bool, torch.uint8, torch.int8):
yield from generate_elementwise_binary_large_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
)
yield from generate_elementwise_binary_broadcasting_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
yield from generate_elementwise_binary_with_scalar_samples(
op, device=device, dtype=dtype, requires_grad=requires_grad
)
yield from generate_elementwise_binary_with_scalar_and_type_promotion_samples(
op, device=device, dtype=dtype, requires_grad=requires_grad
)
if dtype.is_floating_point or dtype.is_complex:
yield from generate_elementwise_binary_extremal_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
)
# Note that these references inputs use scalars for the SampleInput.input value,
# and many tests require SampleInput.input be a tensor or a list of tensors
def reference_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs):
if hasattr(op, "rhs_make_tensor_kwargs"):
exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False)
gen = partial(
_reference_inputs_elementwise_binary, op, device, dtype, requires_grad, exclude_zero, **kwargs
)
# yields "normal" samples
yield from gen()
# yields noncontiguous samples
for sample in gen():
yield sample.noncontiguous()
yield from generate_elementwise_binary_noncontiguous_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
yield from generate_elementwise_binary_arbitrarily_strided_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
# A functional that extends an elementwise binary operator's bespoke error inputs
# with generic error inputs for the class of elementwise binary operations
def make_error_inputs_elementwise_binary(error_inputs_func):
def error_inputs_func_wrapper(op, device, **kwargs):
if error_inputs_func is not None:
yield from error_inputs_func(op, device, **kwargs)
if not op.supports_rhs_python_scalar:
si = SampleInput(torch.tensor((1, 2, 3), device=device), args=(2,))
yield ErrorInput(si, error_type=Exception, error_regex="")
if not op.supports_one_python_scalar:
si = SampleInput(2, args=(torch.tensor((1, 2, 3), device=device),))
yield ErrorInput(si, error_type=Exception, error_regex="")
if (
not kwargs.get("skip_two_python_scalars", False)
and not op.supports_two_python_scalars
):
si = SampleInput(2, args=(3,))
yield ErrorInput(si, error_type=Exception, error_regex="")
return error_inputs_func_wrapper
# The following functions and classes are for testing elementwise binary operators.
# Returns a generator of pairs of contiguous tensors on the requested device
# and with the requested dtype.
#
# This function is intended to test the non-vectorized and vectorized code
# paths of elementwise binary functions, as well as their handling of odd tensor
# sizes (like zero-dim tensors and tensors with zero elements).
#
# Each iterable will include an a tensor with no elements,
# zero dim (scalar) tensors, small 1D tensors, a medium 1D tensor, and
# a large 2D tensor.
def generate_elementwise_binary_tensors(op, *, device, dtype, requires_grad=False, exclude_zero=False):
shapes = (
# tensors with no elements
(0,),
(1, 0, 3),
# zero dim (scalar) tensor
(),
# small 1D tensor
(20,),
# medium 1D tensor
(812,),
# large 2D tensor
(1029, 917),
)
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
for shape in shapes:
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
yield SampleInput(lhs, args=(rhs,))
def generate_elementwise_binary_arbitrarily_strided_tensors(op, *, device, dtype, requires_grad=False, exclude_zero=False):
# shape, strides, offset
strided_cases = (
((5, 6, 2), (1, 1, 7), 2),
((5, 5, 4), (1, 1, 7), 2),
((5, 5, 2), (4, 5, 7), 3),
((5, 5, 2), (5, 5, 7), 3),
((5, 5, 2), (5, 5, 5), 3),
((9, 5, 2), (0, 1, 7), 3),
)
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
for shape, strides, offset in strided_cases:
a = make_arg(500,).as_strided(shape, strides, offset)
b = make_arg(shape)
yield SampleInput(a, args=(b,))
# Returns a generator of pairs of contiguous tensors on the requested device and with
# the requested dtype.
#
# Unlike the previous function, the values in these tensors are specified manually.
def generate_elementwise_binary_small_value_tensors(
op, *, device, dtype, requires_grad=False, exclude_zero=None
):
if exclude_zero is None:
if hasattr(op, "rhs_make_tensor_kwargs"):
exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False)
# defines interesting values
_unsigned_int_vals = (0, 1, 55, 127, 128, 190, 210, 220, 254)
_int_vals = (0, -1, 1, -55, 55, -127, 127, -128)
_float_vals = (
0.0,
-0.0,
-0.001,
0.001,
-0.25,
0.25,
-1.0,
1.0,
-math.pi / 2,
math.pi / 2,
-math.pi + 0.00001,
math.pi - 0.00001,
-math.pi,
math.pi,
-math.pi - 0.00001,
math.pi + 0.00001,
)
l_vals = []
r_vals = []
if dtype.is_floating_point:
prod = product(_float_vals, _float_vals)
elif dtype.is_complex:
complex_vals = product(_float_vals, _float_vals)
# Note the use of list is required here or the map generator will be
# emptied by the following product and it won't produce the desired cross-product
complex_vals = list(map(lambda x: complex(*x), complex_vals))
prod = product(complex_vals, complex_vals)
elif dtype in (torch.int8, torch.int16, torch.int32, torch.int64):
prod = product(_int_vals, _int_vals)
elif dtype is torch.uint8:
prod = product(_unsigned_int_vals, _unsigned_int_vals)
else:
raise ValueError("Unsupported dtype!")
for l, r in prod:
l_vals.append(l)
if r == 0 and exclude_zero:
r_vals.append(1)
else:
r_vals.append(r)
lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad)
rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(lhs, args=(rhs,))
def generate_elementwise_binary_large_value_tensors(
op, *, device, dtype, requires_grad=False
):
_large_int_vals = (-1113, 1113, -10701, 10701)
_large_float16_vals = (-501, 501, -1001.2, 1001.2, -13437.7, 13437.7)
_large_float_vals = _large_float16_vals + (-4988429.2, 4988429.2, -1e20, 1e20)
l_vals = []
r_vals = []
if dtype == torch.float16:
prod = product(_large_float16_vals, _large_float16_vals)
elif dtype.is_floating_point:
prod = product(_large_float_vals, _large_float_vals)
elif dtype.is_complex:
complex_vals = product(_large_float_vals, _large_float_vals)
# Note the use of list is required here or the map generator will be
# emptied by the following product and it won't produce the desired cross-product
complex_vals = list(map(lambda x: complex(*x), complex_vals))
prod = product(complex_vals, complex_vals)
elif dtype in (torch.int16, torch.int32, torch.int64):
prod = product(_large_int_vals, _large_int_vals)
else:
raise ValueError("Unsupported dtype!")
for l, r in prod:
l_vals.append(l)
r_vals.append(r)
lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad)
rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(lhs, args=(rhs,))
def generate_elementwise_binary_extremal_value_tensors(
op, *, device, dtype, requires_grad=False
):
_float_extremals = (float("inf"), float("-inf"), float("nan"))
l_vals = []
r_vals = []
if dtype.is_floating_point:
prod = product(_float_extremals, _float_extremals)
elif dtype.is_complex:
complex_vals = product(_float_extremals, _float_extremals)
# Note the use of list is required here or the map generator will be
# emptied by the following product and it won't produce the desired cross-product
complex_vals = list(map(lambda x: complex(*x), complex_vals))
prod = product(complex_vals, complex_vals)
else:
raise ValueError("Unsupported dtype!")
for l, r in prod:
l_vals.append(l)
r_vals.append(r)
lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad)
rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(lhs, args=(rhs,))
# Test case for NaN propagation
nan = float('nan') if dtype.is_floating_point else complex(float('nan'), float('nan'))
lhs = make_tensor((128, 128), device=device, dtype=dtype, requires_grad=requires_grad)
lhs.flatten()[::3] = nan
rhs = make_tensor((128, 128), device=device, dtype=dtype, requires_grad=requires_grad)
rhs.flatten()[::3] = nan
yield SampleInput(lhs, args=(rhs,))
# Returns a generator of pairs of contiguous and noncontiguous tensors that
# require broadcasting
def generate_elementwise_binary_broadcasting_tensors(
op, *, device, dtype, requires_grad=False, exclude_zero=False
):
shapes = (
((1,), ()),
((2,), ()),
((1,), (2,)),
((2, 1), (2,)),
((1, 2), (2,)),
((3, 2), (2,)),
((1, 3, 2), (2,)),
((1, 3, 2), (3, 2)),
((3, 1, 2), (3, 2)),
((2, 3, 2), ()),
((3, 1, 2), (1, 3, 2)),
)
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
for shape, noncontiguous in product(shapes, [True, False]):
shape_lhs, shape_rhs = shape
lhs = make_arg(
shape_lhs, noncontiguous=noncontiguous, **op.lhs_make_tensor_kwargs
)
rhs = make_arg(
shape_rhs, noncontiguous=noncontiguous, **op.rhs_make_tensor_kwargs
)
yield SampleInput(lhs, args=(rhs,), broadcasts_input=True)
# Returns a generator of pairs of contiguous tensors and scalars
def generate_elementwise_binary_with_scalar_samples(
op, *, device, dtype, requires_grad=False
):
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
shapes = ((), (3,), (5, 3), (0, 1, 3), (1, 5))
if op.supports_rhs_python_scalar:
for shape in shapes:
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item()
rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item()
yield SampleInput(lhs, args=(rhs_scalar,))
# Extends with scalar lhs
if op.supports_one_python_scalar:
yield SampleInput(lhs_scalar, args=(rhs,))
if op.supports_two_python_scalars:
lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item()
rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item()
yield SampleInput(lhs_scalar, args=(rhs_scalar,))
# Returns a generator of pairs of contiguous tensors and 0d tensos and scalars and type promotion
def generate_elementwise_binary_with_scalar_and_type_promotion_samples(
op, *, device, dtype, requires_grad=False
):
# add these samples only for logical and comparison ops, arithmetic ops are not happy about extremal scalars
if op.name in ('eq', 'ne', 'gt', 'ge', 'lt', 'le', 'logical_and', 'logical_or', 'logical_xor'):
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
shape = (23,) # this shape is big enough to trigger vectorization, and has non-vectorized tail
values = (float('nan'), float('inf'), -float('inf'))
scalar_tensors = tuple(torch.tensor(val) for val in values)
if op.supports_rhs_python_scalar:
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
for scalar in values + scalar_tensors:
yield SampleInput(lhs, args=(scalar,))
# Extends with scalar lhs
if op.supports_one_python_scalar:
yield SampleInput(scalar, args=(rhs,))
# Returns a generator of pairs of noncontiguous tensors
def generate_elementwise_binary_noncontiguous_tensors(
op, *, device, dtype, requires_grad=False, exclude_zero=False
):
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
# Generic noncontiguity
lhs = make_arg((1026,), noncontiguous=True, **op.lhs_make_tensor_kwargs)
rhs = make_arg((1026,), noncontiguous=True, **op.rhs_make_tensor_kwargs)
yield SampleInput(lhs.clone(), args=(rhs.clone(),))
yield SampleInput(lhs.contiguous(), args=(rhs,))
# Transposed
lhs = make_arg((789, 357), **op.lhs_make_tensor_kwargs)
rhs = make_arg((789, 357), **op.rhs_make_tensor_kwargs)
yield SampleInput(lhs.T, args=(rhs.T,))
# More noncontiguity
shapes = ((5, 7), (1024,))
for shape in shapes:
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
lhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0]
lhs_non_contig.copy_(lhs)
rhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0]
rhs_non_contig.copy_(rhs)
yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),))
yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,))
# Noncontiguous indices
shape = (2, 2, 1, 2)
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
lhs_non_contig = lhs[:, 1, ...]
rhs_non_contig = rhs[:, 1, ...]
yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),))
yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,))
# Expanded tensors
shapes = ((1, 3), (1, 7), (5, 7))
for shape in shapes:
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
lhs_non_contig = lhs.expand(3, -1, -1)
rhs_non_contig = rhs.expand(3, -1, -1)
yield SampleInput(lhs_non_contig, args=(rhs_non_contig,))
# Sample inputs for elementwise binary operators, like add
def sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs):
_M = S if kwargs.get("small_inputs_only", False) else M
_S = XS if kwargs.get("small_inputs_only", False) else S
if hasattr(op, "rhs_make_tensor_kwargs"):
exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False)
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
shapes = (
((), ()),
((_S,), ()),
((_S, 1), (_S,)),
((_M, _S), ()),
((_S, _M, _S), (_M, _S)),
((_S, _M, _S), (_S, _M, _S)),
((_M, 1, _S), (_M, _S)),
((_M, 1, _S), (1, _M, _S)),
((0, 1, XS), (0, _M, XS)),
)
sample_kwargs = kwargs.get("sample_kwargs", {})
for shape_lhs, shape_rhs in shapes:
lhs = make_arg(shape_lhs, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape_rhs, **op.rhs_make_tensor_kwargs)
broadcasts_input = shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs)
yield SampleInput(
lhs, args=(rhs,), kwargs=sample_kwargs, broadcasts_input=broadcasts_input
)
# Metadata class for binary "universal functions (ufuncs)" that accept two
# tensor and have common properties
class BinaryUfuncInfo(OpInfo):
"""Operator information for 'universal binary functions (binary ufuncs).'
These are functions of two tensors with common properties like:
- they are elementwise functions
- the output shape is determined by the input shape
- they typically have method and inplace variants
- they typically support the out kwarg
- they typically have NumPy or SciPy references
See NumPy's universal function documentation
(https://numpy.org/doc/stable/reference/ufuncs.html) for more details
about the concept of ufuncs.
"""
def __init__(
self,
name,
*,
sample_inputs_func=sample_inputs_elementwise_binary,
reference_inputs_func=reference_inputs_elementwise_binary,
error_inputs_func=None,
lhs_make_tensor_kwargs=None,
rhs_make_tensor_kwargs=None,
promotes_int_to_float=False, # Set to true if the op promotes integer inputs to float
always_returns_bool=False, # Set to true if the op always returns bool tensors
supports_rhs_python_scalar=True, # Whether the operator allows Tensor x scalar inputs
supports_one_python_scalar=False, # Whether the operator allows scalar x tensor and tensor x scalar inputs
supports_two_python_scalars=False, # Whether the operator allows scalar x scalar inputs
**kwargs,
):
self._original_binary_ufunc_args = locals().copy()
# Elementwise binary operations perform the equivalent of test_numpy_refs
# in test_binary_ufuncs, but with additional test granularity. So the
# generic test_ops.py test is skipped because it's redundant.
common_skips = (
DecorateInfo(
unittest.skip("Skipping redundant test."),
"TestCommon",
"test_numpy_refs",
),
)
kwargs["skips"] = kwargs.get("skips", tuple()) + common_skips
super(BinaryUfuncInfo, self).__init__(
name,
sample_inputs_func=sample_inputs_func,
reference_inputs_func=reference_inputs_func,
error_inputs_func=make_error_inputs_elementwise_binary(error_inputs_func),
**kwargs,
)
# [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on.
if lhs_make_tensor_kwargs is None:
lhs_make_tensor_kwargs = {}
self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs
if rhs_make_tensor_kwargs is None:
rhs_make_tensor_kwargs = {}
self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs
self.promotes_int_to_float = promotes_int_to_float
self.always_returns_bool = always_returns_bool
self.supports_rhs_python_scalar = supports_rhs_python_scalar
self.supports_one_python_scalar = supports_one_python_scalar
self.supports_two_python_scalars = supports_two_python_scalars
if self.supports_two_python_scalars:
self.supports_one_python_scalar = True
if self.supports_one_python_scalar:
assert (
supports_rhs_python_scalar
), "Can't support lhs and rhs Python scalars but not rhs scalars!"
# The following functions and classes are for testing elementwise unary operators.
def sample_inputs_elementwise_unary(
op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs
):
if not op_kwargs:
op_kwargs = {}
_L = S if kwargs.get("small_inputs_only", False) else L
low, high = op_info.domain
low = low if low is None else low + op_info._domain_eps
high = high if high is None else high - op_info._domain_eps
if op_info.supports_sparse_csr or op_info.supports_sparse_csc or op_info.supports_sparse_bsr or op_info.supports_sparse_bsc:
# Tensors with dim=2 for sparse compressed testing
yield SampleInput(
make_tensor(
(_L, _L),
device=device,
dtype=dtype,
low=low,
high=high,
requires_grad=requires_grad,
),
kwargs=op_kwargs,
)
else:
# Creates a 1D, empty, and scalar tensor
for shape in ((_L,), (1, 0, 3), ()):
yield SampleInput(
make_tensor(
shape,
device=device,
dtype=dtype,
low=low,
high=high,
requires_grad=requires_grad,
),
kwargs=op_kwargs,
)
# Replace values satisfying condition with a safe value. This is used to block
# out values the could cause singularity like tan(pi/2)
def _replace_values_in_tensor(tensor, condition, safe_value):
mask = condition(tensor)
tensor.masked_fill_(mask, safe_value)
# Helper to create a unary elementwise tensor with valid inputs
def _make_unary_elementwise_tensor(shape, *, op, dtype, **kwargs):
low, high = op.domain
low = low if low is None else low + op._domain_eps
high = high if high is None else high - op._domain_eps
a = make_tensor(shape, low=low, high=high, dtype=dtype, **kwargs)
if op.reference_numerics_filter is not None and dtype is not torch.bool:
condition, safe_value = op.reference_numerics_filter
_replace_values_in_tensor(a, condition, safe_value)
return a
# Restricts the values in the tensor to the domain of the
# given elementwise unary operator
def _filter_unary_elementwise_tensor(a, *, op):
# short-circuits for boolean tensors
if a.dtype is torch.bool:
return a
low, high = op.domain
low = low if low is None else low + op._domain_eps
high = high if high is None else high - op._domain_eps
if a.dtype is torch.uint8 and low is not None:
low = max(low, 0)
if not a.dtype.is_floating_point and not a.dtype.is_complex:
low = math.ceil(low) if low is not None else None
high = math.floor(high) if high is not None else None
if op.reference_numerics_filter is not None:
condition, safe_value = op.reference_numerics_filter
_replace_values_in_tensor(a, condition, safe_value)
if low is not None or high is not None:
if a.dtype.is_complex:
a.real.clamp_(low, high)
a.imag.clamp_(low, high)
else:
a.clamp_(min=low, max=high)
return a
def generate_elementwise_unary_tensors(op, *, device, dtype, requires_grad, **kwargs):
# Special-cases bool
if dtype is torch.bool:
tensors = (
torch.empty(0, device=device, dtype=torch.bool),
torch.tensor(True, device=device),
torch.tensor(False, device=device),
torch.tensor((True, False), device=device),
make_tensor((812,), device=device, dtype=dtype),
make_tensor((1029, 917), device=device, dtype=dtype),
)
for a in tensors:
yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0])
shapes = (
(1029, 917),
(812,),
# Empty sizes
(0,),
(0, 3, 3),
(1, 0, 5),
(6, 0, 0, 0),
(3, 0, 1, 0),
)
make_arg = partial(
_make_unary_elementwise_tensor,
op=op,
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
for shape in shapes:
a = make_arg(shape)
yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0])
def generate_elementwise_unary_small_value_tensors(
op, *, device, dtype, requires_grad=False
):
for sample in generate_elementwise_binary_small_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
):
a = _filter_unary_elementwise_tensor(sample.input, op=op)
yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0])
def generate_elementwise_unary_large_value_tensors(
op, *, device, dtype, requires_grad=False
):
for sample in generate_elementwise_binary_large_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
):
a = _filter_unary_elementwise_tensor(sample.input, op=op)
yield SampleInput(sample.input, kwargs=op.sample_kwargs(device, dtype, a)[0])
def generate_elementwise_unary_extremal_value_tensors(
op, *, device, dtype, requires_grad=False
):
for sample in generate_elementwise_binary_extremal_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
):
yield SampleInput(
sample.input, kwargs=op.sample_kwargs(device, dtype, sample.input)[0]
)
def generate_elementwise_unary_noncontiguous_tensors(
op, *, device, dtype, requires_grad=False
):
low, high = op.domain
low = low if low is None else low + op._domain_eps
high = high if high is None else high - op._domain_eps
make_arg = partial(
_make_unary_elementwise_tensor,
op=op,
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
# Generic noncontiguity
t = make_arg((1026,), noncontiguous=True)
yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0])
# Transposed
t = make_arg((1024, 1024)).T
yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0])
# Expanded tensors
shapes = ((1, 3), (1, 7), (5, 7))
for shape in shapes:
t = make_arg(shape)
t_non_contig = t.expand(3, -1, -1)
yield SampleInput(
t_non_contig, kwargs=op.sample_kwargs(device, dtype, t_non_contig)[0]
)
def generate_elementwise_unary_arbitrarily_strided_tensors(op, *, device, dtype, requires_grad=False):
# shape, strides, offset
strided_cases = (
((5, 6, 2), (1, 1, 7), 2),
((5, 5, 4), (1, 1, 7), 2),
((5, 5, 2), (4, 5, 7), 3),
((5, 5, 2), (5, 5, 7), 3),
((5, 5, 2), (5, 5, 5), 3),
((9, 5, 2), (0, 1, 7), 3),
)
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
for shape, strides, offset in strided_cases:
a = make_arg(500,).as_strided(shape, strides, offset)
yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0])
# Reuses the elementwise binary generators for consistency
# TODO: in the future generalize the reference generators to handle n-ary elementwise operations
def _reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs):
yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs)
yield from generate_elementwise_unary_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
if dtype is not torch.bool:
yield from generate_elementwise_unary_small_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
if dtype not in (torch.bool, torch.uint8, torch.int8) and (
op.handles_large_floats
or (not dtype.is_floating_point and not dtype.is_complex)
):
yield from generate_elementwise_unary_large_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
if dtype.is_floating_point or (op.handles_complex_extremal_values and dtype.is_complex):
yield from generate_elementwise_unary_extremal_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
def reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs):
gen = partial(
_reference_inputs_elementwise_unary, op, device, dtype, requires_grad, **kwargs
)
# yields "normal" samples
yield from gen()
# yields noncontiguous samples
for sample in gen():
yield sample.noncontiguous()
yield from generate_elementwise_unary_noncontiguous_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
yield from generate_elementwise_unary_arbitrarily_strided_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
# Metadata class for unary "universal functions (ufuncs)" that accept a single
# tensor and have common properties like:
class UnaryUfuncInfo(OpInfo):
"""Operator information for 'universal unary functions (unary ufuncs).'
These are functions of a single tensor with common properties like:
- they are elementwise functions
- the input shape is the output shape
- they typically have method and inplace variants
- they typically support the out kwarg
- they typically have NumPy or SciPy references
See NumPy's universal function documentation
(https://numpy.org/doc/1.18/reference/ufuncs.html) for more details
about the concept of ufuncs.
"""
def __init__(
self,
name, # the string name of the function
*,
dtypes=floating_types(),
domain=(None, None), # the [low, high) domain of the function
handles_complex_extremal_values=True, # whether the op correctly handles extremal values (like nan/inf)
handles_large_floats=True, # whether the op correctly handles large float values (like 1e20)
supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle
sample_inputs_func=sample_inputs_elementwise_unary,
reference_inputs_func=reference_inputs_elementwise_unary,
sample_kwargs=lambda device, dtype, input: ({}, {}),
reference_numerics_filter=None, # Filters values in the range of the domain specified above but that should not be tested
**kwargs,
):
self._original_unary_ufunc_args = locals().copy()
super().__init__(
name,
dtypes=dtypes,
sample_inputs_func=sample_inputs_func,
reference_inputs_func=reference_inputs_func,
**kwargs,
)
self.domain = domain
self.handles_complex_extremal_values = handles_complex_extremal_values
self.handles_large_floats = handles_large_floats
self.supports_complex_to_float = supports_complex_to_float
self.reference_numerics_filter = reference_numerics_filter
# test_unary_ufuncs.py generates its own inputs to test the consistency
# of the operator on sliced tensors, non-contig tensors, etc.
# `sample_kwargs` is a utility function to provide kwargs
# along with those inputs if required (eg. clamp).
# It should return two dictionaries, first holding kwarg for
# torch operator and second one for reference NumPy operator.
self.sample_kwargs = sample_kwargs
# Epsilon to ensure grad and gradgrad checks don't test values
# outside a function's domain.
self._domain_eps = 1e-5
def sample_inputs_spectral_ops(self, device, dtype, requires_grad=False, **kwargs):
is_fp16_or_chalf = dtype == torch.complex32 or dtype == torch.half
if not is_fp16_or_chalf:
nd_tensor = partial(make_tensor, (S, S + 1, S + 2), device=device,
dtype=dtype, requires_grad=requires_grad)
oned_tensor = partial(make_tensor, (31,), device=device,
dtype=dtype, requires_grad=requires_grad)
else:
# cuFFT supports powers of 2 for half and complex half precision
# NOTE: For hfft, hfft2, hfftn, irfft, irfft2, irfftn with default args
# where output_size n=2*(input_size - 1), we make sure that logical fft size is a power of two
low = None
high = None
if self.name in ['fft.hfft', 'fft.irfft',
'_refs.fft.hfft', '_refs.fft.irfft']:
shapes = ((2, 9, 9), (33,))
elif self.name in ['fft.hfft2', 'fft.irfft2',
'_refs.fft.hfft2', '_refs.fft.irfft2']:
shapes = ((2, 8, 9), (33,))
elif self.name in ['fft.hfftn', 'fft.irfftn',
'_refs.fft.hfftn', '_refs.fft.irfftn']:
shapes = ((2, 2, 33), (33,))
# Adjusting the limits because the test would be flaky due to over-saturation of float16
# See: https://github.com/pytorch/pytorch/pull/81416
low = -1.0
high = 1.0
else:
shapes = ((2, 8, 16), (32,))
nd_tensor = partial(make_tensor, shapes[0], device=device, low=low, high=high,
dtype=dtype, requires_grad=requires_grad)
oned_tensor = partial(make_tensor, shapes[1], device=device, low=low, high=high,
dtype=dtype, requires_grad=requires_grad)
if self.ndimensional == SpectralFuncType.ND:
return [
SampleInput(nd_tensor(),
kwargs=dict(s=(3, 10) if not is_fp16_or_chalf else (4, 8), dim=(1, 2), norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(s=(8,))),
SampleInput(oned_tensor()),
*(SampleInput(nd_tensor(),
kwargs=dict(dim=dim))
for dim in [-1, -2, -3, (0, -1)]),
]
elif self.ndimensional == SpectralFuncType.TwoD:
return [
SampleInput(nd_tensor(),
kwargs=dict(s=(3, 10) if not is_fp16_or_chalf else (4, 8), dim=(1, 2), norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(s=(6, 8) if not is_fp16_or_chalf else (4, 8))),
SampleInput(nd_tensor(),
kwargs=dict(dim=0)),
SampleInput(nd_tensor(),
kwargs=dict(dim=(0, -1))),
SampleInput(nd_tensor(),
kwargs=dict(dim=(-3, -2, -1))),
]
else:
return [
SampleInput(nd_tensor(),
kwargs=dict(n=10 if not is_fp16_or_chalf else 8, dim=1, norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(n=7 if not is_fp16_or_chalf else 8)
),
SampleInput(oned_tensor()),
*(SampleInput(nd_tensor(),
kwargs=dict(dim=dim))
for dim in [-1, -2, -3]),
]
SpectralFuncType = Enum('SpectralFuncType', ('OneD', 'TwoD', 'ND'))
# Metadata class for Fast Fourier Transforms in torch.fft.
class SpectralFuncInfo(OpInfo):
"""Operator information for torch.fft transforms. """
def __init__(self,
name, # the string name of the function
*,
ref=None, # Reference implementation (probably in np.fft namespace)
dtypes=floating_and_complex_types(),
ndimensional: SpectralFuncType,
sample_inputs_func=sample_inputs_spectral_ops,
decorators=None,
**kwargs):
self._original_spectral_func_args = dict(locals()).copy()
self._original_spectral_func_args.update(kwargs)
decorators = list(decorators) if decorators is not None else []
decorators += [
skipCPUIfNoFFT,
DecorateInfo(toleranceOverride({torch.chalf: tol(4e-2, 4e-2)}),
"TestCommon", "test_complex_half_reference_testing")
]
super().__init__(name=name,
dtypes=dtypes,
decorators=decorators,
sample_inputs_func=sample_inputs_func,
**kwargs)
self.ref = ref
self.ndimensional = ndimensional
class ShapeFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for Shape manipulating operations like tile and roll"""
def __init__(self,
name, # the string name of the function
*,
ref, # a reference function
dtypes=floating_types(),
dtypesIfCUDA=None,
dtypesIfROCM=None,
sample_inputs_func=None,
**kwargs):
super(ShapeFuncInfo, self).__init__(name,
dtypes=dtypes,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
sample_inputs_func=sample_inputs_func,
**kwargs)
self.ref = ref
def sample_inputs_foreach(self, device, dtype, N, *, noncontiguous=False, same_size=False, low=None, high=None):
if same_size:
return [make_tensor((N, N), dtype=dtype, device=device, noncontiguous=noncontiguous) for _ in range(N)]
else:
return [make_tensor((N - i, N - i), dtype=dtype, device=device, noncontiguous=noncontiguous) for i in range(N)]
def get_foreach_method_names(name):
# get torch inplace reference function
op_name = "_foreach_" + name
inplace_op_name = op_name + "_"
op = getattr(torch, op_name, None)
inplace_op = getattr(torch, inplace_op_name, None)
ref = getattr(torch, name, None)
ref_inplace = getattr(torch.Tensor, name + "_", None)
return op, inplace_op, ref, ref_inplace
class ForeachFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for foreach functions"""
def __init__(self,
name,
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
dtypesIfROCM=None,
supports_alpha_param=False,
sample_inputs_func=sample_inputs_foreach,
**kwargs):
super().__init__(
"_foreach_" + name,
dtypes=dtypes,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
sample_inputs_func=sample_inputs_func,
**kwargs
)
foreach_method, foreach_method_inplace, torch_ref_method, torch_ref_inplace = get_foreach_method_names(name)
self.method_variant = foreach_method
self.inplace_variant = foreach_method_inplace
self.ref = torch_ref_method
self.ref_inplace = torch_ref_inplace
self.supports_alpha_param = supports_alpha_param
if name == "norm":
self.ref = torch.linalg.vector_norm
| pytorch-master | torch/testing/_internal/opinfo/core.py |
import collections
import warnings
from functools import partial
import torch
from torch.testing._internal.common_cuda import (TEST_CUDA)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and,
all_types_and_complex,
all_types_and_half,
all_types,
complex_types,
floating_and_complex_types,
floating_types_and_half,
floating_types,
integral_types,
floating_types_and,
floating_and_complex_types_and,
integral_types_and,
all_types_and,
_dispatch_dtypes,
)
COMPLETE_DTYPES_DISPATCH = (
all_types,
all_types_and_complex,
all_types_and_half,
floating_types,
floating_and_complex_types,
floating_types_and_half,
integral_types,
complex_types,
)
EXTENSIBLE_DTYPE_DISPATCH = (
all_types_and_complex_and,
floating_types_and,
floating_and_complex_types_and,
integral_types_and,
all_types_and,
)
# Better way to acquire devices?
DEVICES = ['cpu'] + (['cuda'] if TEST_CUDA else [])
class _dynamic_dispatch_dtypes(_dispatch_dtypes):
# Class to tag the dynamically generated types.
pass
def get_supported_dtypes(op, sample_inputs_fn, device_type):
# Returns the supported dtypes for the given operator and device_type pair.
assert device_type in ['cpu', 'cuda']
if not TEST_CUDA and device_type == 'cuda':
warnings.warn("WARNING: CUDA is not available, empty_dtypes dispatch will be returned!")
return _dynamic_dispatch_dtypes(())
supported_dtypes = set()
for dtype in all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half):
try:
samples = sample_inputs_fn(op, device_type, dtype, False)
except RuntimeError:
# If `sample_inputs_fn` doesn't support sampling for a given
# `dtype`, we assume that the `dtype` is not supported.
# We raise a warning, so that user knows that this was the case
# and can investigate if there was an issue with the `sample_inputs_fn`.
warnings.warn(f"WARNING: Unable to generate sample for device:{device_type} and dtype:{dtype}")
continue
# We assume the dtype is supported
# only if all samples pass for the given dtype.
supported = True
for sample in samples:
try:
op(sample.input, *sample.args, **sample.kwargs)
except RuntimeError as re:
# dtype is not supported
supported = False
break
if supported:
supported_dtypes.add(dtype)
return _dynamic_dispatch_dtypes(supported_dtypes)
def dtypes_dispatch_hint(dtypes):
# Function returns the appropriate dispatch function (from COMPLETE_DTYPES_DISPATCH and EXTENSIBLE_DTYPE_DISPATCH)
# and its string representation for the passed `dtypes`.
return_type = collections.namedtuple('return_type', 'dispatch_fn dispatch_fn_str')
# CUDA is not available, dtypes will be empty.
if len(dtypes) == 0:
return return_type((), str(tuple()))
set_dtypes = set(dtypes)
for dispatch in COMPLETE_DTYPES_DISPATCH:
# Short circuit if we get an exact match.
if set(dispatch()) == set_dtypes:
return return_type(dispatch, dispatch.__name__ + "()")
chosen_dispatch = None
chosen_dispatch_score = 0.
for dispatch in EXTENSIBLE_DTYPE_DISPATCH:
dispatch_dtypes = set(dispatch())
if not dispatch_dtypes.issubset(set_dtypes):
continue
score = len(dispatch_dtypes)
if score > chosen_dispatch_score:
chosen_dispatch_score = score
chosen_dispatch = dispatch
# If user passed dtypes which are lower than the lowest
# dispatch type available (not likely but possible in code path).
if chosen_dispatch is None:
return return_type((), str(dtypes))
return return_type(partial(dispatch, *tuple(set(dtypes) - set(dispatch()))),
dispatch.__name__ + str(tuple(set(dtypes) - set(dispatch()))))
def is_dynamic_dtype_set(op):
# Detect if the OpInfo entry acquired dtypes dynamically
# using `get_supported_dtypes`.
return op.dynamic_dtypes
def str_format_dynamic_dtype(op):
fmt_str = """
OpInfo({name},
dtypes={dtypes},
dtypesIfCUDA={dtypesIfCUDA},
)
""".format(name=op.name,
dtypes=dtypes_dispatch_hint(op.dtypes).dispatch_fn_str,
dtypesIfCUDA=dtypes_dispatch_hint(op.dtypesIfCUDA).dispatch_fn_str)
return fmt_str
| pytorch-master | torch/testing/_internal/opinfo/utils.py |
pytorch-master | torch/testing/_internal/test_module/__init__.py |
|
from __future__ import division
def div_int_future():
return 1 / 2
def div_float_future():
return 3.14 / 0.125
| pytorch-master | torch/testing/_internal/test_module/future_div.py |
import torch # noqa: F401
def div_int_nofuture():
return 1 / 2
def div_float_nofuture():
return 3.14 / 0.125
| pytorch-master | torch/testing/_internal/test_module/no_future_div.py |
#!/usr/bin/env python3
import contextlib
import enum
import logging
import os
import threading
from typing import NamedTuple
import torch
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.nn as nn
from torch.distributed import rpc
from torch.distributed.nn import RemoteModule
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
requires_gloo,
requires_nccl,
skip_if_lt_x_gpu,
skip_if_rocm,
)
from torch.testing._internal.dist_utils import INIT_METHOD_TEMPLATE, dist_init
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
NUM_EM_ROW = 2
D_SPARSE = 3
D_DENSE = 2
D_HID = 3
D_OUT = 1
NUM_TRAINERS = 4
# Trainers + the master + the remote worker
WORLD_SIZE = NUM_TRAINERS + 2
TRAINER_RANKS = list(range(NUM_TRAINERS))
REMOTE_WORKER_RANK = TRAINER_RANKS[-1] + 1
MASTER_RANK = REMOTE_WORKER_RANK + 1
class DdpMode(enum.Enum):
# Don't apply DDP
NONE = enum.auto()
# Apply DDP to the top level nn.Module
OUTSIDE = enum.auto()
# Embed DDP inside the top level nn.Module
INSIDE = enum.auto()
def init_logger():
logger = logging.getLogger(__name__)
level = logging.DEBUG if "debug" in os.environ else logging.INFO
logger.setLevel(level)
console = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s"
)
console.setFormatter(formatter)
console.setLevel(level)
# add the handlers to the logger
logger.addHandler(console)
logger.propagate = False
return logger
gLogger = init_logger()
class FeatureSet(NamedTuple):
""" A feature set has 2 types of features"""
dense_features: torch.Tensor
sparse_features: torch.LongTensor
values: torch.Tensor
def _call_method(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def _remote_method(method, rref, *args, **kwargs):
args_tup = tuple([method, rref] + list(args))
return rpc.rpc_sync(rref.owner(), _call_method, args=args_tup, kwargs=kwargs)
def _remote_method_async(method, rref, *args, **kwargs):
args_tup = tuple([method, rref] + list(args))
return rpc.rpc_async(rref.owner(), _call_method, args=args_tup, kwargs=kwargs)
class RemoteEM(nn.Module):
def __init__(self, num_embeddings: int, embedding_dim: int):
gLogger.info(f"Initing RemoteEM with {num_embeddings} {embedding_dim}")
super(RemoteEM, self).__init__()
init_em = [0.5] * embedding_dim
self.em = nn.EmbeddingBag(
num_embeddings,
embedding_dim,
_weight=torch.tensor([init_em] * num_embeddings),
)
def forward(self, input: torch.Tensor):
gLogger.debug(f"Running RemoteEM.forward() on: {input}")
return self.em(input, offsets=torch.LongTensor(range(input.shape[0])))
# Return a linear module with predefined parameters.
def getLinear(d_in, d_out):
l = nn.Linear(d_in, d_out, bias=False)
w = torch.ones((d_out, d_in))
w[0][0] = -1
w.requires_grad_()
l.weight.data = w
return l
class RemoteNet(nn.Module):
def __init__(self, d_in: int, d_out: int):
gLogger.info(f"Initing RemoteNet with {d_in} {d_out}")
super(RemoteNet, self).__init__()
self.fc = getLinear(d_in, d_out)
self.relu = nn.ReLU()
def forward(self, input: torch.Tensor):
gLogger.debug(f"Running RemoteNet.forward() on: {input}")
return self.relu(self.fc(input))
class HybridModel(nn.Module):
def __init__(
self,
remote_em_rref: rpc.RRef,
remote_net_rref: rpc.RRef,
process_group_for_ddp: dist.ProcessGroup = None,
):
super(HybridModel, self).__init__()
self.remote_em_rref = remote_em_rref
self.remote_net_rref = remote_net_rref
self.fc1 = getLinear(D_DENSE, D_DENSE)
self.fc2 = getLinear(D_HID, D_OUT)
self.non_ddp_params = tuple(self.fc1.parameters()) + tuple(
self.fc2.parameters()
)
self.ddp_params = ()
if process_group_for_ddp is not None:
self.non_ddp_params, self.ddp_params = (
tuple(self.fc1.parameters()),
tuple(self.fc2.parameters()),
)
gLogger.info("Use DDP for the second local net.")
self.fc2 = DistributedDataParallel(
self.fc2, check_reduction=True, process_group=process_group_for_ddp
)
gLogger.info(
f"HybridModel has {len(list(self.parameters()))} groups of parameters."
)
def forward(self, input: FeatureSet):
gLogger.debug(f"Running HybridModel.forward on {input}")
sparse = _remote_method(
RemoteEM.forward, self.remote_em_rref, input.sparse_features
)
# The same size of mini batch.
assert sparse.shape[0] == input.dense_features.shape[0]
dense = self.fc1(input.dense_features)
x = torch.cat((dense, sparse), 1)
gLogger.debug(f"Concatenated feature: {x}")
x = _remote_method(RemoteNet.forward, self.remote_net_rref, x)
return self.fc2(x)
class Trainer:
def __init__(
self,
remote_em_rref: rpc.RRef,
remote_net_rref: rpc.RRef,
ddp_mode: DdpMode,
rank: int,
):
self.rank = rank
self.trainer_group = (
dist.new_group(TRAINER_RANKS)
if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE)
else None
)
self.remote_em_rref = remote_em_rref
self.remote_net_rref = remote_net_rref
self.hybrid_module = HybridModel(
self.remote_em_rref,
self.remote_net_rref,
self.trainer_group if ddp_mode in (DdpMode.INSIDE,) else None,
)
self.ddp_params, self.non_ddp_params = (
self.hybrid_module.ddp_params,
self.hybrid_module.non_ddp_params,
)
if ddp_mode == DdpMode.OUTSIDE:
gLogger.info("Wrapping the whole hybrid module into DDP.")
self.ddp_params += self.non_ddp_params
self.non_ddp_params = ()
self.hybrid_module = DistributedDataParallel(
self.hybrid_module,
check_reduction=True,
process_group=self.trainer_group,
)
gLogger.info(
f"Succeeded in creating a HybridModel instance with "
f"{len(self.ddp_params)} ddp params and {len(self.non_ddp_params)} "
f"other local params."
)
def destroy_pg(self):
if self.trainer_group:
dist.destroy_process_group(self.trainer_group)
def train_batch(
self,
mini_batch: FeatureSet,
trainer_has_less_inputs: bool,
simulate_uneven_inputs: bool,
):
grads_dict = None
if not simulate_uneven_inputs:
input_batches = [mini_batch]
else:
# Split into microbatches, and trim to simulate uneven inputs.
dense_features = mini_batch.dense_features
sparse_features = mini_batch.sparse_features
values = mini_batch.values
dense_microbatch = torch.split(dense_features, 2)
sparse_microbatch = torch.split(sparse_features, 2)
values_microbatch = torch.split(values, 2)
batches = []
for d, s, v in zip(dense_microbatch, sparse_microbatch, values_microbatch):
feature_set = FeatureSet(dense_features=d, sparse_features=s, values=v)
batches.append(feature_set)
if trainer_has_less_inputs:
input_batches = batches[: len(batches) // 2]
gLogger.info(
f"""Trainer reduced input patches from {len(batches)}
to {len(input_batches)} to simulate uneven inputs."""
)
else:
input_batches = batches
with self.hybrid_module.join() if simulate_uneven_inputs else contextlib.suppress():
for b in input_batches:
with dist_autograd.context() as context_id:
output = self.hybrid_module.forward(b)
loss = (output * mini_batch.values).sum()
dist_autograd.backward(context_id, [loss])
grads_dict = dist_autograd.get_gradients(context_id)
gLogger.info(
f"Loss is {loss} for mini batch: {mini_batch}. "
f"Grads dict has {len(grads_dict)} entries: {grads_dict}"
)
return (
tuple(grads_dict[param] for param in self.ddp_params),
tuple(grads_dict[param] for param in self.non_ddp_params),
)
def get_training_examples():
n = 16
training_examples = FeatureSet(
dense_features=torch.zeros((n, D_DENSE)),
sparse_features=torch.zeros(n, dtype=torch.long),
values=torch.zeros(n),
)
idx = 0
# Every example has another one that has exactly the same features but an
# opposite value. Therefore, their grads cancel each other in all-reduce.
for value in (-1, 1):
for x in (-1.0 * value, 1.0 * value):
for y in (1.0 * value, -1.0 * value):
for z in (0, 1):
training_examples.dense_features[idx, :] = torch.tensor((x, y))
training_examples.sparse_features[idx] = z
training_examples.values[idx] = value
idx += 1
# Split the examples among NUM_TRAINERS trainers
assert 0 == (n % NUM_TRAINERS)
examples_per_trainer = int(n / NUM_TRAINERS)
return [
FeatureSet(
dense_features=training_examples.dense_features[
start : start + examples_per_trainer, :
],
sparse_features=training_examples.sparse_features[
start : start + examples_per_trainer
],
values=training_examples.values[start : start + examples_per_trainer],
)
for start in range(0, n, examples_per_trainer)
]
shutdown_signal = threading.Condition()
def set_shutdown_signal():
global shutdown_signal
with shutdown_signal:
shutdown_signal.notify()
class DdpUnderDistAutogradTest(RpcAgentTestFixture):
@property
def world_size(self) -> int:
return WORLD_SIZE
def remote_worker_name(self) -> str:
# The name has to be consistent with that in 'dist_init' decorator.
return f"worker{REMOTE_WORKER_RANK}"
def trainer_name(self, rank):
# The name has to be consistent with that in 'dist_init' decorator.
return f"worker{rank}"
def _remote_worker_process(self, ddp_mode):
gLogger.info("The remote worker is running.")
dist.init_process_group(
backend="gloo",
init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
world_size=self.world_size,
rank=self.rank,
)
if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE):
# new_group needs to be called on ranks.
dist.new_group(TRAINER_RANKS)
global shutdown_signal
with shutdown_signal:
shutdown_signal.wait()
gLogger.info("Exiting remote worker.")
dist.destroy_process_group()
def _trainer_process(self, rank: int):
gLogger.info(f"Running the trainer #{rank}...")
gLogger.info(
f"Initing trainer process group by trainer #{rank} with ranks {TRAINER_RANKS}"
)
dist.init_process_group(
backend="gloo",
init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
world_size=self.world_size,
rank=self.rank,
)
gLogger.info(f"Waiting for shutdown signal on trainer #{rank}...")
global shutdown_signal
with shutdown_signal:
shutdown_signal.wait()
gLogger.info(f"Exiting the trainer #{rank}...")
dist.destroy_process_group()
def _master_process(self, ddp_mode: DdpMode, simulate_uneven_inputs: bool):
gLogger.info("Running the master process...")
dist.init_process_group(
backend="gloo",
init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
world_size=self.world_size,
rank=self.rank,
)
remote_em_rref = rpc.remote(
self.remote_worker_name(), RemoteEM, args=(NUM_EM_ROW, D_SPARSE)
)
remote_net_rref = rpc.remote(
self.remote_worker_name(), RemoteNet, args=(D_DENSE + D_SPARSE, D_HID)
)
gLogger.info("Created remote rrefs on master")
self.do_test_on_master(
ddp_mode, simulate_uneven_inputs, remote_em_rref, remote_net_rref
)
def do_test_on_master(
self,
ddp_mode: DdpMode,
simulate_uneven_inputs: bool,
remote_em_rref: rpc.RRef,
remote_net_rref: rpc.RRef,
):
if simulate_uneven_inputs:
gLogger.info(
"Running DDP + RPC test with simulating uneven inputs across trainers."
)
trainer_rrefs = []
for rank in TRAINER_RANKS:
trainer = self.trainer_name(rank)
trainer_rrefs.append(
rpc.remote(
trainer,
Trainer,
args=(remote_em_rref, remote_net_rref, ddp_mode, rank),
)
)
if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE):
# new_group needs to be called on ranks.
dist.new_group(TRAINER_RANKS)
training_examples = get_training_examples()
for _ in range(3):
futures = []
num_trainers = len(trainer_rrefs)
for idx, trainer_rref in enumerate(trainer_rrefs):
# Half the trainers will deplete inputs earlier than the rest.
trainer_has_less_inputs = (
simulate_uneven_inputs and idx < num_trainers // 2
)
futures.append(
_remote_method_async(
Trainer.train_batch,
trainer_rref,
training_examples[idx],
trainer_has_less_inputs,
simulate_uneven_inputs,
)
)
for future in futures:
ddp_grads, non_ddp_grads = future.wait()
# When there are uneven inputs, it is not necessary that grads
# cancel each other out, since some trainers contribute 0 grad.
if not simulate_uneven_inputs:
for grad in ddp_grads:
self.assertEqual(
grad,
torch.zeros_like(grad),
msg=f"The grad for any ddp parameter should be zeros, because "
"the training examples' grads cancel each other. Received "
f"gradient {grad}",
)
for grad in non_ddp_grads:
self.assertNotEqual(
grad,
torch.zeros_like(grad),
msg="The grad for any non-ddp parameter shouldn't be zeros",
)
# Destroy process groups
for idx, trainer_rref in enumerate(trainer_rrefs):
_remote_method_async(Trainer.destroy_pg, trainer_rref).wait()
# Send shutdown signals.
for rank in TRAINER_RANKS:
trainer = self.trainer_name(rank)
rpc.rpc_sync(trainer, set_shutdown_signal, args=())
rpc.rpc_sync(self.remote_worker_name(), set_shutdown_signal, args=())
def _do_test(self, ddp_mode, simulate_uneven_inputs=False):
if self.rank == MASTER_RANK:
self._master_process(ddp_mode, simulate_uneven_inputs)
elif self.rank == REMOTE_WORKER_RANK:
self._remote_worker_process(ddp_mode)
elif self.rank in TRAINER_RANKS:
self._trainer_process(self.rank)
else:
raise RuntimeError(f"Unknow process rank: {self.rank}")
@requires_gloo()
@dist_init
def test_backward_no_ddp(self):
self._do_test(DdpMode.NONE)
@requires_gloo()
@dist_init
def test_backward_ddp_outside(self):
self._do_test(DdpMode.OUTSIDE)
@requires_gloo()
@dist_init
def test_backward_ddp_outside_uneven_inputs(self):
self._do_test(DdpMode.OUTSIDE, simulate_uneven_inputs=True)
@requires_gloo()
@dist_init
def test_backward_ddp_inside(self):
self._do_test(DdpMode.INSIDE)
# Common utils for both CPU and CUDA test suites
class CommonDdpComparisonTest(RpcAgentTestFixture):
@property
def world_size(self) -> int:
return NUM_TRAINERS
def trainer_name(self, rank):
# The name has to be consistent with that in 'dist_init' decorator.
return f"worker{rank}"
@staticmethod
def get_remote_grads(rref, context_id):
return dist_autograd.get_gradients(context_id)[rref.local_value().weight]
class DdpComparisonTest(CommonDdpComparisonTest):
def _run_test_ddp_comparision(self, simulate_uneven_inputs=False):
gLogger.info(f"Running trainer rank: {self.rank}")
# Each trainer uses a different random seed. Otherwise, they are going
# to have exactly the same initial model parameters, input, and
# therefore grads. That means the grads will be the same before and
# after DDP's all-reduce.
torch.manual_seed(self.rank)
dist.init_process_group(
backend="gloo",
# Postfix file_name with "pg" since file_name is also used by RPC agent
init_method=INIT_METHOD_TEMPLATE.format(file_name=f"{self.file_name}_pg"),
world_size=self.world_size,
rank=self.rank,
)
net = nn.Linear(2, 3)
ddp_net = DistributedDataParallel(net)
# Odd ranks join early if simulate_uneven_inputs.
num_inputs = 1
if simulate_uneven_inputs:
if self.rank % 2 == 0:
num_inputs += 2
inputs_list = [torch.rand((3, 2)) for _ in range(num_inputs)]
if simulate_uneven_inputs:
gLogger.info(f"Rank {self.rank} training with {len(inputs_list)} inputs.")
# Use distributed autograd. The gradients will be in RPC context map.
grads_dict = {}
with ddp_net.join(simulate_uneven_inputs):
for i, inputs in enumerate(inputs_list):
with dist_autograd.context() as context_id:
loss = ddp_net(inputs).norm()
dist_autograd.backward(context_id, [loss])
grads_dict = dist_autograd.get_gradients(context_id)
gLogger.info(f"Trainer #{self.rank} got grad dict: {grads_dict}")
# Use local autograd. The gradients will be in each variable's '.grad'.
ddp_net.zero_grad()
loss = ddp_net(inputs).norm()
loss.backward()
# The gradients should be the same
for param in net.parameters():
self.assertTrue(
param in grads_dict,
msg=f"Param {param} is not in dist_auto grad dict {grads_dict} for iteration {i}",
)
self.assertEqual(
grads_dict[param],
param.grad,
msg=f"The grads for param {param} are different under local "
f"and dist autograd: {param.grad} \n---\n {grads_dict[param]} for iteration {i}",
)
dist.destroy_process_group()
@requires_gloo()
@dist_init
def test_ddp_comparison(self):
self._run_test_ddp_comparision()
@requires_gloo()
@dist_init
def test_ddp_comparison_uneven_inputs(self):
# test with simulating uneven inputs in DDP
self._run_test_ddp_comparision(simulate_uneven_inputs=True)
@requires_gloo()
@dist_init
def test_ddp_dist_autograd_sparse_grads(self):
# Each trainer uses a different random seed. Otherwise, they are going
# to have exactly the same initial model parameters, input, and
# therefore grads. That means the grads will be the same before and
# after DDP's all-reduce.
torch.manual_seed(self.rank)
dist.init_process_group(
backend="gloo",
init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
world_size=self.world_size,
rank=self.rank,
)
model = nn.EmbeddingBag(10, 3, sparse=True)
ddp_model = DistributedDataParallel(model)
# Different inputs for each
input = torch.LongTensor(10).random_(0, 10)
offsets = torch.LongTensor([0, 4])
# Run local.
loss = ddp_model(input, offsets).sum()
loss.backward()
with dist_autograd.context() as context_id:
loss = ddp_model(input, offsets).sum()
dist_autograd.backward(context_id, [loss])
grads_dict = dist_autograd.get_gradients(context_id)
self.assertEqual(1, len(grads_dict))
self.assertEqual(model.weight.grad, grads_dict[model.weight])
@requires_gloo()
@dist_init
def test_ddp_dist_autograd_local_vs_remote(self):
# Each trainer uses a different random seed. Otherwise, they are going
# to have exactly the same initial model parameters, input, and
# therefore grads. That means the grads will be the same before and
# after DDP's all-reduce.
torch.manual_seed(self.rank)
dist.init_process_group(
backend="gloo",
init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
world_size=self.world_size,
rank=self.rank,
)
# Use two different remote device input string, w/ and w/o the default
# device string "cpu", respectively.
for remote_device in ["worker0/cpu", "worker0"]:
remote_layer1 = RemoteModule(
remote_device=remote_device, module_cls=nn.Linear, args=(10, 5, False)
)
layer1 = nn.Linear(10, 5, False)
# Start with the same parameters for remote and local
layer1.weight = remote_layer1.module_rref.to_here().weight
# Run local case.
layer2 = nn.Linear(5, 1)
inputs = torch.rand((10, 10))
ddp_model = DistributedDataParallel(layer2)
loss = ddp_model(layer1(inputs)).sum()
loss.backward()
# Run remote case.
with dist_autograd.context() as context_id:
loss = ddp_model(remote_layer1(inputs)).sum()
dist_autograd.backward(context_id, [loss])
grads_dict = dist_autograd.get_gradients(context_id)
dist.barrier()
self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight])
self.assertEqual(
layer1.weight.grad,
rpc.rpc_sync(
"worker0",
CommonDdpComparisonTest.get_remote_grads,
args=(remote_layer1.module_rref, context_id),
),
)
class CudaDdpComparisonTest(CommonDdpComparisonTest):
@skip_if_lt_x_gpu(NUM_TRAINERS)
@requires_nccl()
@dist_init
@skip_if_rocm
def test_ddp_dist_autograd_local_vs_remote_gpu(self):
# Each trainer uses a different random seed. Otherwise, they are going
# to have exactly the same initial model parameters, input, and
# therefore grads. That means the grads will be the same before and
# after DDP's all-reduce.
torch.manual_seed(self.rank)
dist.init_process_group(
backend="gloo",
init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
world_size=self.world_size,
rank=self.rank,
)
remote_layer1 = RemoteModule(
remote_device="worker0/cpu", module_cls=nn.Linear, args=(10, 7, False)
)
layer1 = nn.Linear(10, 7, False)
# Start with the same parameters for remote and local
layer1.weight = remote_layer1.module_rref.to_here().weight
layer2 = nn.Linear(7, 5).cuda(self.rank)
ddp_layer2 = DistributedDataParallel(layer2, device_ids=[self.rank])
remote_layer3 = RemoteModule(
remote_device="worker0/cpu", module_cls=nn.Linear, args=(5, 3, False)
)
layer3 = nn.Linear(5, 3, False)
# Start with the same parameters for remote and local
layer3.weight = remote_layer3.module_rref.to_here().weight
layer4 = nn.Linear(3, 1).cuda(self.rank)
ddp_layer4 = DistributedDataParallel(layer4, device_ids=[self.rank])
# Run local case.
inputs = torch.rand((10, 10))
loss = ddp_layer4(
layer3(ddp_layer2(layer1(inputs).cuda(self.rank)).cpu()).cuda(self.rank)
).sum()
loss.backward()
# Run remote case.
with dist_autograd.context() as context_id:
loss = ddp_layer4(
remote_layer3(
ddp_layer2(remote_layer1(inputs).cuda(self.rank)).cpu()
).cuda(self.rank)
).sum()
dist_autograd.backward(context_id, [loss])
grads_dict = dist_autograd.get_gradients(context_id)
dist.barrier()
self.assertEqual(
layer1.weight.grad,
rpc.rpc_sync(
"worker0",
CommonDdpComparisonTest.get_remote_grads,
args=(remote_layer1.module_rref, context_id),
),
)
self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight])
self.assertEqual(
layer3.weight.grad,
rpc.rpc_sync(
"worker0",
CommonDdpComparisonTest.get_remote_grads,
args=(remote_layer3.module_rref, context_id),
),
)
self.assertEqual(layer4.weight.grad, grads_dict[layer4.weight])
| pytorch-master | torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py |
pytorch-master | torch/testing/_internal/distributed/__init__.py |
|
from contextlib import contextmanager
from datetime import timedelta
from functools import (
partial,
wraps,
)
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
class MockProcessGroup(dist.ProcessGroup):
def __init__(self, rank, world):
super(MockProcessGroup, self).__init__(rank, world)
def getBackendName(self):
return "mock_process_group"
def create_mock_pg(prefix_store, rank, world_size, timeout):
return MockProcessGroup(rank, world_size)
dist.Backend.register_backend('mock_process_group', create_mock_pg)
def mock_init_dist(rank, world_size):
# !!! WARNING !!!
# Kids don't try this at home, this is a cute pile of hacks that
# depends on a small mountain of c10d internals
assert not dist.is_initialized()
store = dist.HashStore()
# Trick _store_based_barrier into believing everyone else already checked-in
# Zero is the group index
store.add(f"{c10d.STORE_BASED_BARRIER_PREFIX}:0", world_size - 1)
dist.init_process_group(
backend="mock_process_group",
rank=rank,
world_size=world_size,
store=store,
group_name="fake",
timeout=timedelta(seconds=1))
@contextmanager
def with_dist(rank=0, world_size=2):
"""
Context manager that initializer c10d with a fake process group.
"""
mock_init_dist(rank=rank, world_size=world_size)
try:
yield
finally:
dist.destroy_process_group()
def with_fake_comms(func=None, rank=0, world_size=2):
"""
Function wrapper that inits a fake process group designed for testing.
Right now only querying for world size is available
"""
if func is None:
return partial(with_fake_comms, rank=rank, world_size=world_size)
@wraps(func)
def wrapper(self, *args, **kwargs):
with with_dist(rank, world_size):
func(self, *args, **kwargs)
return wrapper
| pytorch-master | torch/testing/_internal/distributed/distributed_utils.py |
import torch
import torch.distributed as dist
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.dist_utils import INIT_METHOD_TEMPLATE, dist_init
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_distributed import (
requires_gloo,
requires_nccl,
skip_if_lt_x_gpu,
skip_if_rocm,
)
from torch.distributed.pipeline.sync import Pipe
class PipeWithDDPTest(RpcAgentTestFixture):
@property
def world_size(self) -> int:
return 2
@skip_if_lt_x_gpu(4)
@requires_nccl()
@dist_init
@skip_if_rocm
def test_basic_nccl_ckpt_never(self):
self._run_basic_test("nccl", "never")
@skip_if_lt_x_gpu(4)
@requires_nccl()
@dist_init
@skip_if_rocm
def test_basic_nccl_ckpt_never_find_unused(self):
self._run_basic_test("nccl", "never", find_unused_parameters=True)
@skip_if_lt_x_gpu(4)
@requires_nccl()
@dist_init
@skip_if_rocm
def test_basic_nccl_ckpt_always(self):
self._run_basic_test("nccl", "always", static_graph=True)
@skip_if_lt_x_gpu(4)
@requires_nccl()
@dist_init
@skip_if_rocm
def test_basic_nccl_ckpt_except_last(self):
self._run_basic_test("nccl", "except_last", static_graph=True)
@skip_if_lt_x_gpu(4)
@requires_gloo()
@dist_init
@skip_if_rocm
def test_basic_gloo_ckpt_never(self):
self._run_basic_test("gloo", "never")
@skip_if_lt_x_gpu(4)
@requires_gloo()
@dist_init
@skip_if_rocm
def test_basic_gloo_ckpt_never_find_unused(self):
self._run_basic_test("gloo", "never", find_unused_parameters=True)
@skip_if_lt_x_gpu(4)
@requires_gloo()
@dist_init
@skip_if_rocm
def test_basic_gloo_ckpt_always(self):
self._run_basic_test("gloo", "always", static_graph=True)
@skip_if_lt_x_gpu(4)
@requires_gloo()
@dist_init
@skip_if_rocm
def test_basic_gloo_ckpt_except_last(self):
self._run_basic_test("gloo", "except_last", static_graph=True)
def _run_basic_test(self, backend, checkpoint, find_unused_parameters=False, static_graph=False):
dist.init_process_group(
backend=backend,
init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
world_size=self.world_size,
rank=self.rank,
)
# Use 4 GPUs, two replicas of a pipe across GPU 0 and 1 and another
# pipe between GPU 2 and 3. Both replicas are replicated via DDP.
fc1 = nn.Linear(16, 8, bias=False).cuda(2 * self.rank)
class MyModule(nn.Module):
def __init__(self, device):
super(MyModule, self).__init__()
self.fc2 = nn.Linear(8, 4, bias=False).cuda(device)
self.fc3 = nn.Linear(4, 2, bias=False).cuda(device)
def forward(self, inp):
if find_unused_parameters:
return self.fc2(inp)
else:
return self.fc3(self.fc2(inp))
layer2 = MyModule(2 * self.rank + 1)
model = nn.Sequential(
fc1,
layer2
)
model = Pipe(model, chunks=2, checkpoint=checkpoint)
model = DistributedDataParallel(
model,
find_unused_parameters=find_unused_parameters,
static_graph=static_graph,
)
# Ensure inputs are different across ranks to verify that gradient
# sync indeed occurs.
model_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1)
out = model(model_input).local_value()
out.sum().backward()
# Run forward again for find_unused_parameters to trigger any potential errors.
if find_unused_parameters:
# Ensure inputs are different across ranks to verify that gradient
# sync indeed occurs.
unused_param_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1)
model(unused_param_input).local_value().sum().backward()
# Run a few more iterations of fwd + bwd to ensure gradient synchronization
# occurs properly across iterations via delay_all_reduce/bucketized allreduce.
for _ in range(3):
model_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1)
out = model(model_input).local_value()
out.sum().backward()
# Check grads
output = [torch.empty_like(fc1.weight.grad), torch.empty_like(fc1.weight.grad)]
dist.all_gather(output, fc1.weight.grad)
self.assertEqual(output[0], output[1])
output = [torch.empty_like(layer2.fc2.weight.grad), torch.empty_like(layer2.fc2.weight.grad)]
dist.all_gather(output, layer2.fc2.weight.grad)
self.assertEqual(output[0], output[1])
if not find_unused_parameters:
output = [torch.empty_like(layer2.fc3.weight.grad), torch.empty_like(layer2.fc3.weight.grad)]
dist.all_gather(output, layer2.fc3.weight.grad)
self.assertEqual(output[0], output[1])
| pytorch-master | torch/testing/_internal/distributed/pipe_with_ddp_test.py |
import copy
import itertools
import math
import os
import random
import sys
import tempfile
import time
from collections import namedtuple, OrderedDict
from contextlib import contextmanager, suppress
from datetime import timedelta
from functools import reduce
from typing import Union, NamedTuple, Callable, Any
import numpy as np
import torch
import torch.cuda
import torch.distributed as dist
import torch.distributed.algorithms.model_averaging.averagers as averagers
import torch.distributed.algorithms.model_averaging.hierarchical_model_averager as hierarchicalSGD
import torch.distributed.algorithms.model_averaging.utils as model_averaging_utils
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils._stateless as _stateless
from torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR
from torch._utils_internal import TEST_MASTER_PORT as MASTER_PORT
from torch.cuda.amp import GradScaler, autocast
from torch.distributed.algorithms.ddp_comm_hooks import (
post_localSGD_hook as post_localSGD,
powerSGD_hook as powerSGD,
default_hooks as default,
quantization as quantization_hooks,
)
from torch.distributed.distributed_c10d import (
get_world_size,
_get_default_group,
AllreduceOptions,
GroupMember,
)
from torch.distributed.utils import (
_verify_param_shape_across_processes,
_sync_module_states,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel.distributed import _dump_DDP_relevant_env_vars
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
TEST_SKIPS,
init_multigpu_helper,
initialize_temp_directories,
cleanup_temp_dir,
simple_sparse_reduce_tests,
skip_if_rocm,
skip_if_small_worldsize,
skip_if_odd_worldsize,
skip_if_lt_x_gpu,
nccl_skip_if_lt_x_gpu,
skip_if_no_gpu,
require_n_gpus_for_nccl_backend,
requires_nccl_version,
captured_output,
with_nccl_blocking_wait,
with_dist_debug_levels,
verify_ddp_error_logged,
DistTestCases
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
IS_MACOS,
IS_WINDOWS,
FILE_SCHEMA,
IS_FBCODE,
NO_MULTIPROCESSING_SPAWN,
parametrize,
sandcastle_skip,
sandcastle_skip_if,
)
import torch.distributed.optim.post_localSGD_optimizer as post_localSGD_optimizer
from torch.utils.data.distributed import DistributedSampler
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
if sys.platform == "win32":
import msvcrt
else:
import fcntl
class NetWithBuffers(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Linear(10, 10, bias=False)
self.b = nn.Linear(10, 1, bias=False)
self.register_buffer('buffer', torch.randn(1, 2))
def forward(self, x):
self.buffer.add_(1)
return self.b(self.a(x))
class Foo:
def __init__(self, x):
# Can be tensor or int
self.x = x
def __eq__(self, other):
def eq(value, other):
if isinstance(value, torch.Tensor):
return torch.equal(value, other)
return value == other
for attr, value in self.__dict__.items():
other_value = other.__dict__[attr]
if not eq(value, other_value):
return False
return True
f = Foo(10)
f.bar = 1
foo_cpu_tensor = Foo(torch.randn(3, 3))
COLLECTIVES_OBJECT_TEST_LIST = [
{"key1": 3, "key2": 4, "key3": {"nested": True}},
f,
foo_cpu_tensor,
"foo",
[1, 2, True, "string", [4, 5, "nested"]],
]
# Allowlist of distributed backends where profiling collectives is supported.
PROFILING_SUPPORTED_BACKENDS = [
dist.Backend.NCCL,
dist.Backend.GLOO,
dist.Backend.MPI,
]
# Allowlist of distributed backends where profiling is supported with use_cuda=True
CUDA_PROFILING_SUPPORTED_BACKENDS = [
dist.Backend.GLOO,
dist.Backend.MPI,
dist.Backend.NCCL,
]
# Allowlist of distributed backends where profiling is supported for p2p ops
SEND_RECV_PROFILING_SUPPORTED_BACKENDS = [
dist.Backend.MPI,
dist.Backend.GLOO,
dist.Backend.NCCL,
]
# Dummy NamedTuple data structures to test DDP support for NamedTuple types.
EXPECTED_FIELDS = ("a", "b")
TestNamedTupleInput_0 = namedtuple("NamedTuple", EXPECTED_FIELDS)
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
skipIfNoTorchVision = sandcastle_skip_if(not HAS_TORCHVISION, "no torchvision")
BACKEND = os.environ["BACKEND"]
INIT_METHOD = os.getenv("INIT_METHOD", "env://")
DEFAULT_TIMEOUT = 300
CUSTOMIZED_TIMEOUT = {"test_DistributedDataParallel": 500}
def get_profiling_event(postfix, profiler):
event_list = (
profiler.events()
if isinstance(profiler, torch.profiler.profile)
else profiler.function_events
)
return [event for event in event_list if event.name.endswith(postfix)]
# Base error message substring on unfinished reductions.
ddp_prev_reduction_unfinished_str = (
"Expected to have finished reduction in the prior iteration"
)
# Error message substring when find_unused_parameters=True has not been passed
ddp_recommend_find_unused_params_str = (
"passing the keyword argument `find_unused_parameters=True`"
)
# Error message substring when find_unused_parameters=True is enabled
ddp_find_unused_params_enabled_str = "Since `find_unused_parameters=True` is enabled"
# Error message substring for possibility of not all model outputs being used
# in loss computation
ddp_outputs_not_used_in_loss_str = (
"`forward` function outputs participate in calculating loss"
)
# Error message substring suggesting to use TORCH_DISTRIBUTED_DEBUG
ddp_suggest_debug_mode_str = (
"set the environment variable TORCH_DISTRIBUTED_DEBUG to either INFO or DETAIL"
)
class DDPUnevenTestInput(NamedTuple):
name: str
model: nn.Module
inp: Union[torch.tensor, tuple]
sync_interval: int
throw_on_early_termination: bool = False
hook: Callable = None
state: Any = None
class _FC2(nn.Module):
def __init__(self):
super(_FC2, self).__init__()
self.fc = nn.Linear(10, 50, bias=True)
self.fc.bias.requires_grad = False
def forward(self, x):
x = self.fc(x)
return x
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = _FC2()
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
)
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class LargeNet(nn.Module):
def __init__(self):
super(LargeNet, self).__init__()
self.fc1 = nn.Linear(1000, 2000, bias=False)
self.fc2 = nn.Linear(2000, 500, bias=False)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
class Task(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, x):
return self.p + x
class BatchNormNet(nn.Module):
def __init__(self, affine=True):
super(BatchNormNet, self).__init__()
self.fc1 = nn.Linear(2, 40, bias=False)
self.bn = nn.BatchNorm1d(4, affine=affine)
self.fc2 = nn.Linear(40, 4, bias=False)
def forward(self, x):
x = torch.reshape(self.fc1(x), (-1, 4, 10))
x = self.bn(x)
x = torch.reshape(x, (-1, 40))
x = self.fc2(x)
return F.softmax(x, dim=1)
class UnusedParamTwoLinLayerNet(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Linear(10, 10, bias=False)
self.b = nn.Linear(10, 10, bias=False)
self.c = nn.Linear(5, 5, bias=False)
def forward(self, x):
a = self.a(x)
b = self.b(x)
return (a, b)
class DictOutputModule(nn.Module):
def __init__(self):
super().__init__()
self.module = UnusedParamTwoLinLayerNet()
def forward(self, x):
predictions = self.module(x)
loss = (predictions[0] + predictions[1]).sum()
return {
"predictions": predictions,
"loss": loss,
}
class TwoLinLayerNet(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Linear(10, 10, bias=False)
self.b = nn.Linear(10, 1, bias=False)
def forward(self, x):
a = self.a(x)
b = self.b(x)
return (a, b)
class EmbeddingNetDifferentParams(nn.Module):
"""
A module containing an embedding with different dimension or different # of
parameters depending on the rank.
"""
def __init__(self, rank, diff_num_params=False):
super().__init__()
embedding_dim = 500 if diff_num_params or rank == 0 else 50
self.embedding = nn.Embedding(num_embeddings=10, embedding_dim=embedding_dim)
self.lin = nn.Linear(embedding_dim, 1)
if diff_num_params:
self.lin2 = nn.Linear(1, 1, bias=False)
def forward(self, x):
x = self.embedding(x)
return self.lin(x)
class ControlFlowToyModel(nn.Module):
def __init__(self):
super(ControlFlowToyModel, self).__init__()
self.lin1 = nn.Linear(10, 10, bias=False)
self.lin2 = nn.Linear(10, 10, bias=False)
def forward(self, x):
# Second layer is used dependent on input x.
use_second_layer = torch.equal(x, torch.ones(20, 10, device=x.device))
if use_second_layer:
return self.lin2(F.relu(self.lin1(x)))
else:
return F.relu(self.lin1(x))
DDP_NET = Net()
BN_NET = BatchNormNet()
BN_NET_NO_AFFINE = BatchNormNet(affine=False)
ONLY_SBN_NET = nn.SyncBatchNorm(2, momentum=0.99)
def get_timeout(test_id):
test_name = test_id.split(".")[-1]
if test_name in CUSTOMIZED_TIMEOUT:
return CUSTOMIZED_TIMEOUT[test_name]
else:
return DEFAULT_TIMEOUT
default_pg_timeout = 60
CUSTOM_PG_TIMEOUT = {
# This test runs slowly and needs additional time to complete, otherwise can
# be taken down by NCCL_ASYNC_ERROR_HANDLING
"test_ddp_uneven_inputs": 300,
# This test has a short timeout since it tests being taken down by
# NCCL_ASYNC_ERROR_HANDLING which we want to happen quickly.
"test_ddp_model_diff_across_ranks": 5,
}
def require_backend(backends):
if BACKEND not in backends:
return sandcastle_skip("Test requires backend to be one of %s" % backends)
return lambda func: func
def require_backends_available(backends):
def check(backend):
if backend == dist.Backend.GLOO:
return dist.is_gloo_available()
if backend == dist.Backend.NCCL:
return dist.is_nccl_available()
if backend == dist.Backend.MPI:
return dist.is_mpi_available()
if backend in DistTestCases.backend_feature["plugin"]:
return True
return False
if not all(check(dist.Backend(backend)) for backend in backends):
return sandcastle_skip("Test requires backends to be available %s" % backends)
return lambda func: func
def require_world_size(world_size):
if int(os.environ["WORLD_SIZE"]) < world_size:
return sandcastle_skip("Test requires world size of %d" % world_size)
return lambda func: func
@contextmanager
def _lock():
TEMP_DIR = os.environ["TEMP_DIR"]
lockfile = os.path.join(TEMP_DIR, "lockfile")
with open(lockfile, "w") as lf:
try:
if sys.platform == "win32":
msvcrt.locking(lf.fileno(), msvcrt.LK_RLCK, 1)
yield
else:
fcntl.flock(lf.fileno(), fcntl.LOCK_EX)
yield
finally:
if sys.platform == "win32":
msvcrt.locking(lf.fileno(), msvcrt.LK_UNLCK, 1)
else:
fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
lf.close()
def _build_tensor(size, value=None, dtype=torch.float, device_id=None):
if value is None:
value = size
if device_id is None:
return torch.empty(size, size, size, dtype=dtype).fill_(value)
else:
return torch.empty(size, size, size, dtype=dtype).fill_(value).cuda(device_id)
def _build_multidim_tensor(dim, dim_size, value=None, dtype=torch.float):
if value is None:
value = dim
return torch.empty(size=[dim_size for _ in range(dim)], dtype=dtype).fill_(value)
def _create_autograd_profiler():
return torch.autograd.profiler.profile(record_shapes=True)
def _create_torch_profiler():
return torch.profiler.profile(
activities=[
torch.profiler.ProfilerActivity.CPU,
],
record_shapes=True,
)
class Barrier(object):
barrier_id = 0
@classmethod
def init(cls):
cls.barrier_id = 0
barrier_dir = os.path.join(os.environ["TEMP_DIR"], "barrier")
for f_name in os.listdir(barrier_dir):
os.unlink(os.path.join(barrier_dir, f_name))
@classmethod
def sync(cls, wait_for=None, timeout=10):
if wait_for is None:
wait_for = dist.get_world_size()
cls.barrier_id += 1
barrier_dir = os.path.join(os.environ["TEMP_DIR"], "barrier")
pid = str(os.getpid())
barrier_file = os.path.join(barrier_dir, pid)
with _lock():
with open(barrier_file, "w") as f:
f.write(str(cls.barrier_id))
start_time = time.time()
while True:
arrived = 0
with _lock():
for f_name in os.listdir(barrier_dir):
with open(os.path.join(barrier_dir, f_name), "r") as f:
data = f.read()
if int(data) >= cls.barrier_id:
arrived += 1
if arrived == wait_for:
break
if time.time() - start_time > timeout:
raise RuntimeError("barrier timeout")
time.sleep(0.1)
class TestDistBackend(MultiProcessTestCase):
@classmethod
def setUpClass(cls):
os.environ["MASTER_ADDR"] = str(MASTER_ADDR)
os.environ["MASTER_PORT"] = str(MASTER_PORT)
super().setUpClass()
def setUp(self):
super().setUp()
# initialize temp directories
initialize_temp_directories()
# initialize Barrier
Barrier.init()
# Skip return code checking for following tests as they are expected to
# crash a process due to NCCL_ASYNC_ERROR_HANDLING.
self.skip_return_code_checks = []
def tearDown(self):
cleanup_temp_dir()
super().tearDown()
@property
def init_method(self):
return "{}{file_name}".format(FILE_SCHEMA, file_name=self.file_name)
@classmethod
def _run(cls, rank, test_name, file_name, pipe):
# Enable DDP + ReplicatedTensor
from torch.nn.parallel._replicated_tensor_ddp_utils import _set_ddp_with_replicated_tensor
_set_ddp_with_replicated_tensor(True)
if BACKEND == "nccl" and not torch.cuda.is_available():
sys.exit(TEST_SKIPS["no_cuda"].exit_code)
self = cls(test_name)
self.rank = rank
self.file_name = file_name
if torch.cuda.is_available() and torch.cuda.device_count() < int(
self.world_size
):
sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
try:
pg_timeout_seconds = CUSTOM_PG_TIMEOUT.get(test_name, default_pg_timeout)
timeout = timedelta(seconds=pg_timeout_seconds)
dist.init_process_group(
init_method=self.init_method,
backend=BACKEND,
world_size=int(self.world_size),
rank=self.rank,
timeout=timeout,
)
except RuntimeError as e:
if "recompile" in e.args[0]:
sys.exit(TEST_SKIPS["backend_unavailable"].exit_code)
raise
# Execute barrier prior to running test to ensure that every process
# has finished initialization and that the following test
# immediately exiting due to a skip doesn't cause flakiness.
self._barrier()
self.run_test(test_name, pipe)
self._barrier()
dist.destroy_process_group()
sys.exit(0)
# Needed since MultiProcessTestCase assumes a world_size of 4, but we
# run these tests under other various world_sizes.
@property
def world_size(self):
return os.environ["WORLD_SIZE"]
class DistributedTest:
class _DistTestBase:
def _barrier(self, *args, **kwargs):
Barrier.sync(*args, **kwargs)
def _init_group_test(self, **kwargs):
group = [1, 2]
group_id = dist.new_group(group, **kwargs)
rank = dist.get_rank()
if rank not in group:
return ([], None, rank)
return (group, group_id, rank)
def _init_full_group_test(self, **kwargs):
group = list(range(0, dist.get_world_size()))
group_id = dist.new_group(**kwargs)
rank = dist.get_rank()
return (group, group_id, rank)
def _init_global_test(self):
group = list(range(0, dist.get_world_size()))
group_id = dist.group.WORLD
rank = dist.get_rank()
return (group, group_id, rank)
def _verify_buffers_equal(self, m1, m2):
# verify buffers across models
m1_buf_dict = {k: v for k, v in m1.module.named_buffers()}
for name, buf in m2.module.named_buffers():
self.assertEqual(buf, m1_buf_dict[name])
# Verify buffers across ranks.
m1_buffers = list(m1.buffers())
m2_buffers = list(m2.buffers())
for (buf1, buf2) in zip(m1_buffers, m2_buffers):
gathered_bufs = [
torch.empty_like(buf1) for _ in range(dist.get_world_size())
]
dist.all_gather(gathered_bufs, buf1)
gathered_bufs_m2 = [
torch.empty_like(buf2) for _ in range(dist.get_world_size())
]
for b in gathered_bufs:
self.assertEqual(b, buf1)
dist.all_gather(gathered_bufs_m2, buf2)
for b in gathered_bufs_m2:
self.assertEqual(b, buf2)
def test_dump_DDP_relevant_env_vars(self):
with captured_output() as (out, _):
_dump_DDP_relevant_env_vars()
lines = out.getvalue().splitlines()
def format_line(var):
return "env:%s=%s" % (
var,
os.environ[var] if var in os.environ else "N/A",
)
# Check relevant env vars
vars = [
"MASTER_ADDR",
"MASTER_PORT",
"WORLD_SIZE",
"NCCL_TOPO_DUMP_FILE", # N/A
"NCCL_ASYNC_ERROR_HANDLING",
]
for var in vars:
line = format_line(var)
self.assertIn(line, lines)
# Check irrelevant env vars
vars = [
"xxx",
"yyy",
"zzz",
]
for var in vars:
line = format_line(var)
self.assertNotIn(line, lines)
# GET RANK
def test_get_rank(self):
test_dir = os.path.join(os.environ["TEMP_DIR"], "test_dir")
pid = str(os.getpid())
num_processes = dist.get_world_size()
with open(os.path.join(test_dir, pid), "w") as f:
f.write(str(dist.get_rank()))
self._barrier()
all_ranks = set()
for f_name in os.listdir(test_dir):
with open(os.path.join(test_dir, f_name), "r") as f:
all_ranks.add(int(f.read()))
self.assertEqual(len(all_ranks), num_processes)
self._barrier()
if dist.get_rank() == 0:
for f_name in os.listdir(test_dir):
os.unlink(os.path.join(test_dir, f_name))
self._barrier()
def test_get_backend(self):
if dist.get_world_size() > 2:
group = [1, 2]
else:
group = [0, 1]
group_id = dist.new_group(group)
backend_str = BACKEND.lower()
self.assertEqual(dist.get_backend(), backend_str)
if dist.get_rank() in group:
self.assertEqual(dist.get_backend(group_id), backend_str)
else:
with self.assertRaisesRegex(
RuntimeError, "Invalid process group specified"
):
dist.get_backend(group_id)
def test_Backend_enum_class(self):
# test parsing
backend = BACKEND.lower()
self.assertEqual(dist.Backend(BACKEND.upper()), backend)
self.assertEqual(dist.Backend(BACKEND), backend)
with self.assertRaisesRegex(ValueError, "Invalid backend: 'undefined'"):
dist.Backend("undefined")
with self.assertRaisesRegex(ValueError, "Invalid backend: 'xYz'"):
dist.Backend("xYz")
with self.assertRaises(ValueError):
dist.Backend(None)
with self.assertRaises(ValueError):
dist.Backend(3)
with self.assertRaises(ValueError):
dist.Backend(["gloo"])
# Test destroy
def test_destroy_group(self):
if dist.get_world_size() > 2:
group = [1, 2]
else:
group = [0, 1]
group_id = dist.new_group(group)
self._barrier()
dist.destroy_process_group(group_id)
# Test get rank and size of group
def test_get_rank_size_group(self):
if dist.get_world_size() > 2:
group = [1, 2]
else:
group = [0, 1]
group_id = dist.new_group(group)
if dist.get_rank() in group:
self.assertEqual(dist.get_world_size(group_id), 2)
self.assertTrue(dist.get_rank(group_id) in list(range(2)))
else:
self.assertEqual(dist.get_world_size(group_id), -1)
self.assertEqual(dist.get_rank(group_id), -1)
# Test destroy full groups
def test_destroy_full_group(self):
_, group_id, _ = self._init_full_group_test()
self._barrier()
dist.destroy_process_group(group_id)
# Test get rank and size of full group
def test_get_rank_size_full_group(self):
_, group_id, _ = self._init_full_group_test()
self.assertEqual(dist.get_world_size(group_id), dist.get_world_size())
self.assertEqual(dist.get_rank(group_id), dist.get_rank())
def _test_barrier_timeout(self, group_id, timeout):
local_rank = dist.get_rank(group_id)
# Only execute barrier on rank == 0, causing it to timeout
if local_rank == 0:
expected_time = time.time() + timeout.total_seconds()
# In debug mode, we execute a monitored_barrier before the
# collective, so assert on that.
if dist.get_debug_level() == dist.DebugLevel.DETAIL:
exception_ctx = self.assertRaisesRegex(
Exception, "failed to pass monitoredBarrier"
)
else:
exception_ctx = self.assertRaisesRegex(
Exception, " (Timed out|closed|timeout) "
)
with exception_ctx:
dist.barrier(group_id)
self.assertGreaterAlmostEqual(time.time(), expected_time, delta=0.1)
else:
pass
@sandcastle_skip_if(BACKEND != "gloo", "Only gloo backend supports timeouts")
@sandcastle_skip_if(
not INIT_METHOD.startswith("file://"),
"Requires file:// initialization method. "
+ "Both tcp:// and env:// rely on the TCP store for which "
"reinitialization has proven racy.",
)
def test_barrier_timeout_global(self):
dist.destroy_process_group()
# Explicitly pass world size to the barrier because we've
# just destroyed any state in torch.distributed.
self._barrier(wait_for=int(os.environ["WORLD_SIZE"]))
# Reinitialize global process group
timeout = timedelta(seconds=1)
dist.init_process_group(
init_method=INIT_METHOD,
backend=BACKEND,
world_size=int(os.environ["WORLD_SIZE"]),
rank=self.rank,
timeout=timeout,
)
self._test_barrier_timeout(dist.group.WORLD, timeout)
@skip_if_small_worldsize
@sandcastle_skip_if(BACKEND != "gloo", "Only gloo backend supports timeouts")
def test_barrier_timeout_group(self):
timeout = timedelta(seconds=5)
_, group_id, _ = self._init_group_test(timeout=timeout)
if group_id is not None:
self._test_barrier_timeout(group_id, timeout)
@sandcastle_skip_if(BACKEND != "gloo", "Only gloo backend supports timeouts")
def test_barrier_timeout_full_group(self):
timeout = timedelta(seconds=1)
_, group_id, _ = self._init_full_group_test(timeout=timeout)
if group_id is not None:
self._test_barrier_timeout(group_id, timeout)
# This test helper can only be used when using the Gloo or NCCL backend
# **and** both the Gloo and NCCL backends are available.
# See the @skip annotations below.
def _test_group_override_backend(self, initializer):
if BACKEND == "gloo":
new_backend = "nccl"
elif BACKEND == "nccl":
new_backend = "gloo"
elif BACKEND in DistTestCases.backend_feature["plugin"]:
new_backend = "gloo"
group, group_id, rank = initializer(backend=new_backend)
if group_id is None:
return
if new_backend == "gloo":
self.assertTrue(isinstance(group_id, dist.ProcessGroupGloo))
if new_backend == "nccl":
self.assertTrue(isinstance(group_id, dist.ProcessGroupNCCL))
self.assertEqual(rank, group[dist.get_rank(group_id)])
self.assertEqual(len(group), dist.get_world_size(group_id))
# Pin device (so we avoid NCCL race conditions/deadlocks).
group_rank = dist.get_rank(group_id)
torch.cuda.set_device(group_rank)
# Run broadcast of CUDA tensor (so it works for both Gloo and NCCL).
tensor = _build_tensor(2, value=group_rank).cuda()
dist.broadcast(tensor, src=group[0], group=group_id)
self.assertEqual(_build_tensor(2, value=0), tensor.to("cpu"))
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@require_world_size(3)
@skip_if_lt_x_gpu(2)
def test_backend_group(self):
self._test_group_override_backend(self._init_group_test)
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(3)
def test_backend_full_group(self):
self._test_group_override_backend(self._init_full_group_test)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices"
)
@require_world_size(4)
@skip_if_lt_x_gpu(2)
def test_new_subgroups(self):
subgroup_size = 2
cur_subgroup, subgroups = dist.new_subgroups(subgroup_size)
world_size = dist.get_world_size()
self.assertEqual(cur_subgroup.size(), subgroup_size)
self.assertEqual(len(subgroups), world_size / subgroup_size)
self.assertFalse(dist._rank_not_in_group(cur_subgroup))
for subgroup in subgroups:
dist.destroy_process_group(subgroup)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices"
)
@skip_if_no_gpu
def test_new_subgroups_group_size_exceeds_world_size(self):
with self.assertRaisesRegex(
ValueError, "The arg 'group_size' must not exceed the world size"
):
dist.new_subgroups(100)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices"
)
@require_world_size(4)
@skip_if_lt_x_gpu(4)
def test_new_subgroups_world_size_not_divisible_by_group_size(self):
with self.assertRaisesRegex(
ValueError, "The world size must be divisible by 'group_size'"
):
dist.new_subgroups(3)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices"
)
@require_world_size(4)
@skip_if_lt_x_gpu(4)
def test_new_subgroups_by_enumeration(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
cur_subgroup, subgroups = dist.new_subgroups_by_enumeration(
ranks_per_subgroup_list=[[0, 2], [1, 3]]
)
if device_id >= 4:
self.assertIsNone(cur_subgroup)
else:
self.assertEqual(cur_subgroup.size(), 2)
self.assertEqual(len(subgroups), 2)
if device_id == 0 or device_id == 2:
self.assertEqual(cur_subgroup, subgroups[0])
else:
self.assertEqual(cur_subgroup, subgroups[1])
for subgroup in subgroups:
dist.destroy_process_group(subgroup)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices"
)
@require_world_size(4)
@skip_if_lt_x_gpu(4)
def test_new_subgroups_by_enumeration_input_rank_exceeds_world_size(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
world_size = get_world_size(group_id)
with self.assertRaisesRegex(
RuntimeError,
"The new group's rank should be within the the world_size set by init_process_group",
):
dist.new_subgroups_by_enumeration(
ranks_per_subgroup_list=[[0, 1], [world_size, 2]]
)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices"
)
@skip_if_no_gpu
def test_new_subgroups_by_enumeration_negative_input_rank(self):
group, group_id, rank = self._init_global_test()
with self.assertRaisesRegex(
RuntimeError,
"The new group's rank should be within the the world_size set by init_process_group",
):
dist.new_subgroups_by_enumeration(
ranks_per_subgroup_list=[[-1, -2], [-3, -4]]
)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices"
)
@require_world_size(4)
@skip_if_lt_x_gpu(4)
def test_new_subgroups_overlap_not_allowed(self):
with self.assertRaisesRegex(
ValueError, "Rank 1 has appeared in both subgroup"
):
dist.new_subgroups_by_enumeration(
ranks_per_subgroup_list=[[0], [1, 2], [1, 3]]
)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices"
)
@skip_if_lt_x_gpu(2)
def test_average_parameters(self):
rank = dist.get_rank()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
model = nn.Sequential(
nn.Conv2d(3, 3, kernel_size=3, padding=1),
nn.ReLU(),
nn.Linear(1, 5, bias=False),
).cuda(device_id)
# Test global model averaging
for p in model.parameters():
p.data = torch.ones_like(p.data)
model_averaging_utils.average_parameters(
params=model.parameters(), process_group=None
)
# Every element will be the same as the input.
for p in model.parameters():
self.assertEqual(p.data, torch.ones_like(p.data))
# Test partial model averaging
for p in model.parameters():
p.data = torch.ones_like(p.data) * rank
group_nccl = dist.new_group(ranks=[0, 1], backend="nccl")
model_averaging_utils.average_parameters(
params=model.parameters(), process_group=group_nccl
)
if not dist._rank_not_in_group(group_nccl):
# Every element on device 0 or 1 should be the average of 0 and 1, i.e., 0.5.
for p in model.parameters():
self.assertEqual(p.data, torch.ones_like(p.data) * 0.5)
else:
# Every element on device not in the subgroup should remain the same.
for p in model.parameters():
self.assertEqual(p.data, torch.ones_like(p.data) * rank)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices"
)
@skip_if_lt_x_gpu(2)
def test_periodic_model_averager(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
model = nn.Linear(1, 5, bias=False).cuda(device_id)
param = next(model.parameters())
tensor = torch.ones_like(param.data) * rank
expected_avg_tensor = (
torch.ones_like(param.data) * sum(range(world_size)) / world_size
)
period = 4
for warmup_steps in [12, 13, 14, 15]:
averager = averagers.PeriodicModelAverager(period=period, warmup_steps=warmup_steps)
for step in range(0, 20):
# Reset the parameters at every step.
param.data = copy.deepcopy(tensor)
for params in model.parameters():
# mock grad
params.grad = torch.ones_like(param.data)
averager.average_parameters(model.parameters())
if step >= warmup_steps and (step - warmup_steps) % period == 0:
self.assertEqual(param.data, expected_avg_tensor)
else:
# No model averaging, so the parameters are not updated.
self.assertEqual(param.data, tensor)
@skip_if_lt_x_gpu(2)
def test_periodic_model_averager_param_group(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
model = nn.Linear(1, 5, bias=False).cuda(device_id)
param = next(model.parameters())
opt = torch.optim.SGD(model.parameters(), lr=0.1)
period = 4
for warmup_steps in [12, 13, 14, 15]:
averager = averagers.PeriodicModelAverager(period=period, warmup_steps=warmup_steps)
for step in range(0, 20):
# Reset the parameters at every step.
for param_group in opt.param_groups:
for params in param_group["params"]:
# mock grad
params.grad = torch.ones_like(param.data) * rank
params.data = torch.ones_like(param.data) * rank
averager.average_parameters(opt.param_groups)
if step >= warmup_steps and (step - warmup_steps) % period == 0:
for param_group in opt.param_groups:
for params in param_group["params"]:
if params.grad is None:
continue
self.assertEqual(param.data, torch.ones_like(param.data) * sum(range(world_size)) / world_size)
else:
# No model averaging, so the parameters are not updated.
for param_group in opt.param_groups:
for params in param_group["params"]:
if params.grad is None:
continue
self.assertEqual(param.data, torch.ones_like(param.data) * rank)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices"
)
@skip_if_lt_x_gpu(2)
def test_1_level_hierarchical_model_averager_equivalent_to_periodic_model_averager(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
model = nn.Linear(1, 5, bias=False).cuda(device_id)
param = next(model.parameters())
tensor = torch.ones_like(param.data) * rank
expected_avg_tensor = (
torch.ones_like(param.data) * sum(range(world_size)) / world_size
)
period = 4
for warmup_steps in [12, 13, 14, 15]:
averager = hierarchicalSGD.HierarchicalModelAverager(
# Run the global averaging at a period of 4,
# which is equivalent to the above periodic model averaging test case.
period_group_size_dict=OrderedDict([(period, world_size)]), warmup_steps=warmup_steps
)
averager = averagers.PeriodicModelAverager(period=period, warmup_steps=warmup_steps)
for step in range(0, 20):
# Reset the parameters at every step.
param.data = copy.deepcopy(tensor)
for params in model.parameters():
# mock grad
params.grad = torch.ones_like(param.data)
averager.average_parameters(model.parameters())
if step >= warmup_steps and (step - warmup_steps) % period == 0:
self.assertEqual(param.data, expected_avg_tensor)
else:
# No model averaging, so the parameters are not updated.
self.assertEqual(param.data, tensor)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["subgroup"],
f"The {BACKEND} backend does not support creating subgroups on CUDA devices"
)
@require_world_size(4)
@skip_if_lt_x_gpu(4)
def test_3_level_hierarchical_model_averager(self):
from torch.distributed.distributed_c10d import _pg_group_ranks
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
model = nn.Linear(1, 5, bias=False).cuda(device_id)
param = next(model.parameters())
tensor = torch.ones_like(param.data) * rank
# Set up such a hierarchical model averaging as follows:
# after the first 10 warmup steps,
# run model averaging every 2 steps within each subgroup of size 2,
# run model averaging every 4 steps within each subgroup of size 3,
# and run the global model averaging every 8 steps.
# If there is a conflict in model averaging at a step, only run the highest-level model averaging.
warmup_steps = 10
subgroup_size1 = 2
subgroup_avg_period1 = 2
subgroup_size2 = 4
subgroup_avg_period2 = 4
global_avg_period = 8
period_group_size_dict = OrderedDict(
[(subgroup_avg_period1, subgroup_size1),
(subgroup_avg_period2, subgroup_size2),
(global_avg_period, world_size)])
averager = hierarchicalSGD.HierarchicalModelAverager(
period_group_size_dict=period_group_size_dict, warmup_steps=warmup_steps
)
subgroup1 = averager.period_process_group_dict[subgroup_avg_period1]
subgroup2 = averager.period_process_group_dict[subgroup_avg_period2]
real_group_ranks_res1 = list(_pg_group_ranks[subgroup1].keys())
real_group_ranks_res2 = list(_pg_group_ranks[subgroup2].keys())
expect_group_ranks_res1 = (rank // subgroup_size1 * subgroup_size1 + np.array(list(range(subgroup_size1)))).tolist()
expect_group_ranks_res2 = (rank // subgroup_size2 * subgroup_size2 + np.array(list(range(subgroup_size2)))).tolist()
self.assertEqual(real_group_ranks_res1, expect_group_ranks_res1)
self.assertEqual(real_group_ranks_res2, expect_group_ranks_res2)
expected_avg_tensor_within_subgroup1 = (
torch.ones_like(param.data) * sum(real_group_ranks_res1) / subgroup_size1
)
expected_avg_tensor_within_subgroup2 = (
torch.ones_like(param.data) * sum(real_group_ranks_res2) / subgroup_size2
)
expected_global_avg_tensor = (
torch.ones_like(param.data) * sum(range(world_size)) / world_size
)
for step in range(0, 25):
# Reset the parameters at every step.
param.data = copy.deepcopy(tensor)
for params in model.parameters():
# mock grad
params.grad = torch.ones_like(param.data)
averager.average_parameters(model.parameters())
if step == 16 or step == 24:
# Run global model averaging when `step` can be divided by 8.
self.assertEqual(param.data, expected_global_avg_tensor)
elif step == 12 or step == 20:
# Run model averaging within subgroup when `step` can be divided by 4 but not by 8.
self.assertEqual(param.data, expected_avg_tensor_within_subgroup2)
elif step == 10 or step == 14 or step == 18 or step == 22:
# Run model averaging within subgroup when `step` can be divided by 2 but not by 4 or 8.
self.assertEqual(param.data, expected_avg_tensor_within_subgroup1)
else:
# No model averaging, so the parameters are not updated.
self.assertEqual(param.data, tensor)
# NCCL Batch SEND RECV
@skip_if_no_gpu
@sandcastle_skip_if(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_nccl(self):
self._barrier()
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
p2p_op_list = []
recv_tensors = [None for _ in range(world_size)]
expected_tensors = [None for _ in range(world_size)]
for val in ["1", "0"]:
os.environ["NCCL_BLOCKING_WAIT"] = val
for src in range(0, world_size):
send_tensor = _build_tensor(rank + 1, device_id=device_id).fill_(src)
recv_tensors[src] = _build_tensor(src + 1, value=-1, device_id=device_id).fill_(-1)
expected_tensors[src] = _build_tensor(src + 1, value=-1, device_id=device_id).fill_(rank)
recv_op = dist.P2POp(dist.irecv, recv_tensors[src], src)
p2p_op_list.append(recv_op)
send_op = dist.P2POp(dist.isend, send_tensor, src)
p2p_op_list.append(send_op)
reqs = dist.batch_isend_irecv(p2p_op_list)
for req in reqs:
req.wait()
for src in range(0, world_size):
self.assertEqual(recv_tensors[src], expected_tensors[src])
self._barrier()
@skip_if_no_gpu
@sandcastle_skip_if(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_ring_exchange_nccl(self):
self._barrier()
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
p2p_op_list = []
send_tensor = _build_tensor(world_size, device_id=device_id)
recv_tensor = _build_tensor(world_size, value=-1, device_id=device_id)
send_op = dist.P2POp(dist.isend, send_tensor, (rank + 1) % world_size)
recv_op = dist.P2POp(dist.irecv, recv_tensor, (rank - 1 + world_size) % world_size)
reqs = dist.batch_isend_irecv([send_op, recv_op])
for req in reqs:
req.wait()
self._barrier()
@skip_if_no_gpu
@sandcastle_skip_if(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_self_nccl(self):
self._barrier()
# Ensure the process group has been fully initialized (needed by
# the first sub-group batch_isend_irecv call)
dist.barrier()
rank = dist.get_rank()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
p2p_op_list = []
if rank == 0:
send_tensor = _build_tensor(rank + 1, device_id=device_id)
recv_tensor = _build_tensor(rank + 1, value=-1, device_id=device_id)
recv_op = dist.P2POp(dist.irecv, recv_tensor, 0)
p2p_op_list.append(recv_op)
send_op = dist.P2POp(dist.isend, send_tensor, 0)
p2p_op_list.append(send_op)
reqs = dist.batch_isend_irecv(p2p_op_list)
for req in reqs:
req.wait()
self._barrier()
@skip_if_no_gpu
@skip_if_small_worldsize
@sandcastle_skip_if(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_no_rank_zero_nccl(self):
self._barrier()
# Ensure the process group has been fully initialized (needed by
# the first sub-group batch_isend_irecv call)
dist.barrier()
rank = dist.get_rank()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
p2p_op_list = []
if rank == 1:
peer = 2
elif rank == 2:
peer = 1
if rank in [1, 2]:
send_tensor = _build_tensor(rank + 1, device_id=device_id)
recv_tensor = _build_tensor(peer + 1, value=-1, device_id=device_id)
recv_op = dist.P2POp(dist.irecv, recv_tensor, peer)
p2p_op_list.append(recv_op)
send_op = dist.P2POp(dist.isend, send_tensor, peer)
p2p_op_list.append(send_op)
reqs = dist.batch_isend_irecv(p2p_op_list)
for req in reqs:
req.wait()
self._barrier()
# GLOO Batch SEND RECV CPU
@sandcastle_skip_if(BACKEND != "gloo", "GLOO Batch Send Recv CPU")
def test_batch_isend_irecv_gloo(self):
self._barrier()
rank = dist.get_rank()
p2p_op_list = []
for src in range(0, dist.get_world_size()):
if src == rank:
continue
send_tensor = _build_tensor(rank + 1)
recv_tensor = _build_tensor(src + 1, value=-1)
recv_op = dist.P2POp(dist.irecv, recv_tensor, src)
p2p_op_list.append(recv_op)
send_op = dist.P2POp(dist.isend, send_tensor, src)
p2p_op_list.append(send_op)
reqs = dist.batch_isend_irecv(p2p_op_list)
for req in reqs:
req.wait()
self._barrier()
# GLOO Batch SEND RECV CPU with provided tags
@sandcastle_skip_if(BACKEND != "gloo", "GLOO Batch Send Recv CPU")
def test_batch_isend_irecv_gloo_tags(self):
self._barrier()
rank = dist.get_rank()
p2p_op_list = []
for src in range(0, dist.get_world_size()):
if src == rank:
continue
send_tensor = _build_tensor(rank + 1)
recv_tensor = _build_tensor(src + 1, value=-1)
recv_op = dist.P2POp(dist.irecv, recv_tensor, src, tag=src)
p2p_op_list.append(recv_op)
send_op = dist.P2POp(dist.isend, send_tensor, src, tag=rank)
p2p_op_list.append(send_op)
reqs = dist.batch_isend_irecv(p2p_op_list)
for req in reqs:
req.wait()
self._barrier()
# NCCL Batch SEND RECV Tensor Error
@sandcastle_skip_if(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_tensor_err(self):
self._barrier()
rank = dist.get_rank()
if rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Tensors must be CUDA and dense"
):
send_tensor = _build_tensor(rank + 1)
send_op = dist.P2POp(dist.isend, send_tensor, 1)
dist.batch_isend_irecv([send_op])
# NCCL Batch SEND RECV Op Error
@sandcastle_skip_if(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_op_err(self):
self._barrier()
rank = dist.get_rank()
if rank == 0:
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
with self.assertRaisesRegex(RuntimeError, "^Invalid ``op``"):
send_tensor = _build_tensor(rank + 1, device_id=device_id)
send_op = dist.P2POp(dist.broadcast, send_tensor, 1)
dist.batch_isend_irecv([send_op])
# NCCL Batch SEND RECV p2p_op_list Error
@sandcastle_skip_if(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_op_list_err(self):
self._barrier()
rank = dist.get_rank()
if rank == 0:
with self.assertRaisesRegex(RuntimeError, "^Invalid ``p2p_op_list``"):
dist.batch_isend_irecv([1, 2])
# NCCL Batch SEND RECV Mixed Backend Error
@sandcastle_skip_if(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_mixed_backend_err(self):
self._barrier()
rank = dist.get_rank()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
group_gloo = dist.new_group(ranks=[0, 1], backend="gloo")
group_nccl = dist.new_group(ranks=[0, 1], backend="nccl")
if rank == 0:
with self.assertRaisesRegex(
RuntimeError, "All ops need to use the same group"
):
send_tensor = _build_tensor(rank + 1)
send_op_gloo = dist.P2POp(dist.isend, send_tensor, 1, group_gloo)
send_op_nccl = dist.P2POp(dist.isend, send_tensor, 1, group_nccl)
dist.batch_isend_irecv([send_op_gloo, send_op_nccl])
# NCCL SEND RECV
@skip_if_no_gpu
@sandcastle_skip_if(BACKEND != "nccl", "NCCL Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def _test_send_recv_nccl(self, profiler_ctx=None):
# TODO: now that nccl send/recv is supported, there does not seem to
# be a need to have nccl send/recv be tested separately.
rank = dist.get_rank()
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
tensor = _build_tensor(rank + 1, device_id=device_id)
profiler_cls = profiler_ctx if profiler_ctx is not None else suppress()
with profiler_cls as prof:
for src in range(0, world_size):
if src == rank:
# Send mode
for dst in range(0, world_size):
if dst == rank:
continue
dist.send(tensor, dst)
else:
# Recv mode
expected_tensor = _build_tensor(src + 1)
output_tensor = _build_tensor(
src + 1, value=-1, device_id=device_id
)
dist.recv(output_tensor, src)
self.assertEqual(output_tensor, expected_tensor)
self._barrier()
if profiler_ctx is not None:
backend = dist.get_backend()
if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:
for event_name in [f"{backend}:send", f"{backend}:recv"]:
events = get_profiling_event(event_name, prof)
self.assertTrue(events)
# Event order is not deterministic, so simply assert their shape
# is found in the following list.
expected_shapes = [
[[rank + 1] * 3] for rank in range(dist.get_world_size())
]
for event in events:
self.assertTrue(event.input_shapes in expected_shapes)
@skip_if_no_gpu
@sandcastle_skip_if(BACKEND != "nccl", "NCCL Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_send_recv_nccl(self):
self._test_send_recv_nccl()
@skip_if_no_gpu
@sandcastle_skip_if(BACKEND != "nccl", "NCCL Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
def test_send_recv_nccl_autograd_profiler(self):
profiler_ctx = torch.autograd.profiler.profile(record_shapes=True)
self._test_send_recv_nccl(profiler_ctx)
@skip_if_no_gpu
@sandcastle_skip_if(BACKEND != "nccl", "NCCL Send Recv Only")
@requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv")
@sandcastle_skip_if(IS_FBCODE, "Kineto in fbcode causes hang")
@sandcastle_skip_if(
IS_MACOS or IS_WINDOWS,
"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124",
)
def test_send_recv_nccl_torch_profiler(self):
profiler_ctx = torch.profiler.profile(
activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA,
],
record_shapes=True,
)
self._test_send_recv_nccl(profiler_ctx)
# SEND RECV
def _test_send_recv(self, profiler_ctx):
rank = dist.get_rank()
send_size = rank + 1
tensor = _build_tensor(send_size)
ctx = profiler_ctx if profiler_ctx is not None else suppress()
with ctx as prof:
for src in range(0, dist.get_world_size()):
if src == rank:
# Send mode
for dst in range(0, dist.get_world_size()):
if dst == rank:
continue
dist.send(tensor, dst)
else:
# Recv mode
recv_size = src + 1
expected_tensor = _build_tensor(recv_size)
output_tensor = _build_tensor(recv_size, value=-1)
dist.recv(output_tensor, src)
self.assertEqual(output_tensor, expected_tensor)
if profiler_ctx is not None:
backend = dist.get_backend()
if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:
for event_name in [f"{backend}:send", f"{backend}:recv"]:
events = get_profiling_event(event_name, prof)
# Each rank sends/recvs from all other ranks.
event_count = sum(e.count for e in events)
expected_event_count = dist.get_world_size() - 1
self.assertEqual(event_count, expected_event_count)
# Event order is not deterministic, so simply assert their shape
# is found in the following list.
expected_shapes = [
[[rank + 1] * 3] for rank in range(dist.get_world_size())
]
for event in events:
self.assertTrue(event.is_async)
self.assertTrue(event.input_shapes in expected_shapes)
@sandcastle_skip_if(
BACKEND == "nccl", "Nccl send/recv tested by test_send_recv_nccl"
)
def test_send_recv(self):
self._test_send_recv(profiler_ctx=None)
@sandcastle_skip_if(
BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl"
)
def test_send_recv_autograd_profiler(self):
autograd_profiler_ctx = _create_autograd_profiler()
self._test_send_recv(profiler_ctx=autograd_profiler_ctx)
@sandcastle_skip_if(
BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl"
)
@sandcastle_skip_if(IS_FBCODE, "Kineto in fbcode causes hang")
@sandcastle_skip_if(
IS_MACOS or IS_WINDOWS,
"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124",
)
def test_send_recv_torch_profiler(self):
torch_profiler_ctx = _create_torch_profiler()
return self._test_send_recv(profiler_ctx=torch_profiler_ctx)
# SEND RECV ANY SOURCE
def _test_send_recv_any_source(self, profiler_ctx):
rank = dist.get_rank()
send_recv_size = 10
tensor = _build_tensor(send_recv_size, value=rank)
recv_ranks = list()
irecv_ranks = list()
ctx = profiler_ctx if profiler_ctx is not None else suppress()
with ctx as prof:
for dst in range(0, dist.get_world_size()):
if dst == rank:
# Recv mode
for dst in range(0, dist.get_world_size()):
if dst == rank:
continue
for recv in ["recv", "irecv"]:
output_tensor = _build_tensor(send_recv_size, value=-1)
if recv == "recv":
sender = dist.recv(output_tensor)
recv_ranks.append(sender)
elif recv == "irecv":
work = dist.irecv(output_tensor)
work.wait()
sender = work._source_rank()
irecv_ranks.append(sender)
# Assert the scalar value "sender" that should be
# equal to the rank of the sender is equal to all
# values in the received tensor.
self.assertTrue(output_tensor.eq(sender).all())
else:
# Send mode
dist.send(tensor, dst) # recv
dist.send(tensor, dst) # irecv
if profiler_ctx is not None:
backend = dist.get_backend()
if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:
for event_name in [f"{backend}:send", f"{backend}:recvAnySource"]:
events = get_profiling_event(event_name, prof)
# Each rank sends/recvs from other rank twice.
self.assertEqual(
sum(event.count for event in events),
2 * (dist.get_world_size() - 1),
)
for event in events:
self.assertTrue(event.is_async)
self.assertEqual(event.input_shapes, [[send_recv_size] * 3])
# Each rank would have 2 * (world_size - 1) sends, verify that
# globally we receive the same amount on the other end.
recv_ranks_tensor = torch.cat(
(torch.tensor(recv_ranks), torch.tensor(irecv_ranks)), 0
)
global_recv_ranks = [
torch.empty_like(recv_ranks_tensor)
for _ in range(dist.get_world_size())
]
dist.all_gather(global_recv_ranks, recv_ranks_tensor)
global_recv_ranks_list = []
for tensor in global_recv_ranks:
global_recv_ranks_list += tensor.tolist()
from itertools import groupby
global_recv_ranks_list.sort()
frequency = [
len(list(group)) for key, group in groupby(global_recv_ranks_list)
]
self.assertEqual(dist.get_world_size(), len(frequency))
self.assertEqual(
[2 * (dist.get_world_size() - 1)] * dist.get_world_size(), frequency
)
self._barrier()
@sandcastle_skip_if(
BACKEND in DistTestCases.skip_collective["sendrecv anysource"], f"{BACKEND} does not support send/recv from any source"
)
def test_send_recv_any_source(self):
self._test_send_recv_any_source(profiler_ctx=None)
@sandcastle_skip_if(
BACKEND in DistTestCases.skip_collective["sendrecv anysource"], f"{BACKEND} does not support send/recv from any source"
)
def test_send_recv_any_source_autograd_profiler(self):
autograd_profiler_ctx = _create_autograd_profiler()
self._test_send_recv_any_source(profiler_ctx=autograd_profiler_ctx)
@sandcastle_skip_if(
BACKEND in DistTestCases.skip_collective["sendrecv anysource"], f"{BACKEND} does not support send/recv from any source"
)
@sandcastle_skip_if(IS_FBCODE, "Kineto in fbcode code causes hang")
@sandcastle_skip_if(
IS_MACOS or IS_WINDOWS,
"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124",
)
def test_send_recv_any_source_torch_profiler(self):
torch_profiler_ctx = _create_torch_profiler()
return self._test_send_recv_any_source(profiler_ctx=torch_profiler_ctx)
# SEND RECV WITH TAG
def _test_send_recv_with_tag(self, profiler_ctx):
rank = dist.get_rank()
world_size = dist.get_world_size()
send_recv_size = 10
tensor = _build_tensor(send_recv_size, value=rank)
ctx = profiler_ctx if profiler_ctx is not None else suppress()
with ctx as prof:
for dst in range(0, world_size):
if dst == rank:
# Recv mode
for src in range(0, world_size):
if src == rank:
continue
output_tensor = _build_tensor(send_recv_size, value=-1)
dist.recv(output_tensor, src, tag=src)
self.assertTrue(output_tensor.eq(src).all())
else:
# Send mode
dist.send(tensor, dst, tag=rank)
if profiler_ctx is not None:
backend = dist.get_backend()
if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:
for event_name in [f"{backend}:send", f"{backend}:recv"]:
events = get_profiling_event(event_name, prof)
# Each rank sends/recvs from all other ranks
event_count = sum(e.count for e in events)
expected_event_count = dist.get_world_size() - 1
self.assertEqual(event_count, expected_event_count)
for event in events:
self.assertTrue(event.is_async)
self.assertEqual(event.name, event_name)
self.assertEqual(event.input_shapes, [[send_recv_size] * 3])
@sandcastle_skip_if(
BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl"
)
def test_send_recv_with_tag(self):
self._test_send_recv_with_tag(profiler_ctx=None)
@sandcastle_skip_if(
BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl"
)
def test_send_recv_with_tag_autograd_profiler(self):
autograd_profiler_ctx = _create_autograd_profiler()
return self._test_send_recv_with_tag(profiler_ctx=autograd_profiler_ctx)
@sandcastle_skip_if(
BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl"
)
@sandcastle_skip_if(IS_FBCODE, "Kineto in fbcode code causes hang")
@sandcastle_skip_if(
IS_MACOS or IS_WINDOWS,
"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124",
)
def test_send_recv_with_tag_torch_profiler(self):
torch_profiler_ctx = _create_torch_profiler()
return self._test_send_recv_with_tag(profiler_ctx=torch_profiler_ctx)
# ISEND
def _test_isend(self, profiler_ctx):
rank = dist.get_rank()
world_size = dist.get_world_size()
ctx = profiler_ctx if profiler_ctx is not None else suppress()
with ctx as prof:
if rank == 0:
requests = [
dist.isend(_build_tensor(dest, 10), dest)
for dest in range(1, world_size)
]
for request in requests:
request.wait()
self.assertTrue(request.is_completed())
else:
tensor = _build_tensor(rank, -1)
dist.recv(tensor, 0)
self.assertEqual(tensor, _build_tensor(rank, 10))
self._barrier()
if profiler_ctx is not None:
backend = dist.get_backend()
if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:
expected_event_name = (
f"{backend}:send" if rank == 0 else f"{backend}:recv"
)
events = get_profiling_event(expected_event_name, prof)
event_count = sum(e.count for e in events)
expected_count = dist.get_world_size() - 1 if rank == 0 else 1
self.assertEqual(expected_count, event_count)
# Event ordering is not guaranteed, so simply ensure the shapes are
# found in the following map.
expected_shapes = {
r: [[r] * 3] for r in range(1, dist.get_world_size())
}
for event in events:
self.assertTrue(event.is_async)
self.assertEqual(event.name, expected_event_name)
if rank == 0:
self.assertTrue(
event.input_shapes in expected_shapes.values()
)
else:
self.assertEqual(event.input_shapes, expected_shapes[rank])
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support isend")
def test_isend(self):
self._test_isend(profiler_ctx=None)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support isend")
def test_isend_autograd_profiler(self):
autograd_profiler_ctx = _create_autograd_profiler()
self._test_isend(profiler_ctx=autograd_profiler_ctx)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support isend")
@sandcastle_skip_if(IS_FBCODE, "Kineto in fbcode code causes hang")
@sandcastle_skip_if(
IS_MACOS or IS_WINDOWS,
"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124",
)
def test_isend_torch_profiler(self):
torch_profiler_ctx = _create_torch_profiler()
self._test_isend(profiler_ctx=torch_profiler_ctx)
# IRECV
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support irecv")
def test_irecv(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
if rank == 0:
expected_tensors = [
_build_tensor(src, -1) for src in range(1, world_size)
]
requests = [
dist.irecv(expected_tensors[src - 1], src)
for src in range(1, world_size)
]
for src in range(1, world_size):
requests[src - 1].wait()
self.assertTrue(requests[src - 1].is_completed())
self.assertEqual(expected_tensors[src - 1], _build_tensor(src, 10))
else:
tensor = _build_tensor(rank, 10)
dist.send(tensor, 0)
self._barrier()
# BROADCAST
def _test_broadcast_helper(
self,
group,
group_id,
rank,
cuda=False,
rank_to_GPU=None,
with_options=False,
):
for dtype, value, requires_cuda in [
(torch.float, -1e-10, False),
(torch.double, -1e-100, False),
(torch.half, -0.1, True),
(torch.int8, -2, False),
(torch.uint8, 129, False),
(torch.int, -1e5, False),
(torch.long, -1e15, False),
]:
if requires_cuda and not cuda:
continue
for src in group:
expected_tensor = _build_tensor(src + 1, value, dtype)
if cuda:
expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])
if rank == src:
if with_options:
opts = dist.BroadcastOptions()
opts.rootTensor = 0
opts.rootRank = src
self.call_dist_op(
":broadcast",
True,
group_id.broadcast,
[expected_tensor],
opts,
)
else:
self.call_dist_op(
":broadcast",
False,
dist.broadcast,
expected_tensor,
src,
group_id,
)
else:
tensor = _build_tensor(src + 1, -1, dtype)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
if with_options:
opts = dist.BroadcastOptions()
opts.rootTensor = 0
opts.rootRank = src
self.call_dist_op(
":broadcast", True, group_id.broadcast, [tensor], opts
)
else:
self.call_dist_op(
":broadcast",
False,
dist.broadcast,
tensor,
src,
group_id,
)
self.assertEqual(tensor.size(), expected_tensor.size())
self.assertEqual(
tensor.ne(expected_tensor).max(), torch.tensor(False)
)
self._barrier()
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_broadcast(self):
group, group_id, rank = self._init_global_test()
self._test_broadcast_helper(group, group_id, rank)
@sandcastle_skip_if(
BACKEND != "gloo" and BACKEND != "nccl",
"Only Gloo and Nccl backend supports CUDA allReduce",
)
@skip_if_no_gpu
def test_broadcast_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU)
@skip_if_small_worldsize
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_broadcast_group(self):
group, group_id, rank = self._init_group_test()
self._test_broadcast_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_broadcast_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_broadcast_helper(group, group_id, rank)
@sandcastle_skip_if(
BACKEND != "nccl",
"Only NCCL backend supports high priority stream",
)
@skip_if_no_gpu
def test_nccl_high_priority_stream(self):
group, _, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
new_port = str(MASTER_PORT + 1)
os.environ["MASTER_PORT"] = new_port
gen_iterator = dist.rendezvous("env://", rank, dist.get_world_size())
store, rank, size = next(gen_iterator)
store = dist.PrefixStore(new_port, store)
opts = dist.ProcessGroupNCCL.Options()
opts.is_high_priority_stream = False
group_id = dist.ProcessGroupNCCL(store, rank, size, opts)
self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU, True)
# REDUCE
def _test_reduce_helper(
self,
group,
group_id,
rank,
op,
master_value,
worker_value,
expected_value,
cuda=False,
rank_to_GPU=None,
):
for src in group:
tensor = _build_tensor(src + 1).fill_(
master_value if rank == src else worker_value
)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
self.call_dist_op(
":reduce",
False,
dist.reduce,
tensor,
src,
op,
group_id,
tensor_shapes=[tensor.shape],
)
if rank == src:
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
self._barrier()
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce")
def test_reduce_sum(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA reduce")
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce")
@skip_if_no_gpu
def test_reduce_sum_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + 10 * (len(group) - 1),
True,
rank_to_GPU,
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce")
def test_reduce_product(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce")
def test_reduce_min(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce")
def test_reduce_max(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce")
@skip_if_small_worldsize
def test_reduce_group_sum(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce")
@skip_if_small_worldsize
def test_reduce_group_product(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce")
@skip_if_small_worldsize
def test_reduce_group_min(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce")
@skip_if_small_worldsize
def test_reduce_group_max(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce")
def test_reduce_full_group_sum(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce")
def test_reduce_full_group_product(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce")
def test_reduce_full_group_min(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce")
def test_reduce_full_group_max(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
# REDUCE TWICE
def _test_reduce_twice_helper(
self,
group,
group_id,
rank,
op,
master_value,
worker_value,
expected_value,
cuda=False,
rank_to_GPU=None,
):
for src in group:
tensors = [
_build_tensor(src + 1).fill_(
master_value if rank == src else worker_value
)
for i in range(2)
]
if cuda:
for i in range(2):
tensors[i] = tensors[i].cuda(rank_to_GPU[rank][0])
self.call_dist_op(
":reduce",
False,
dist.reduce,
tensors[0],
src,
op,
group_id,
secondary_op_call=lambda: dist.reduce(
tensors[1], src, op, group_id
),
tensor_shapes=[tensors[0].shape],
)
if rank == src:
for tensor in tensors:
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
self._barrier()
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce")
def test_reduce_sum_twice(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_twice_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA reduce")
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce")
@skip_if_no_gpu
def test_reduce_sum_cuda_twice(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
self._test_reduce_twice_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + 10 * (len(group) - 1),
True,
rank_to_GPU,
)
@skip_if_no_gpu
@require_backend(DistTestCases.backend_feature["gpu"])
def test_all_reduce_result_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
for src in group:
if rank == src:
tensor = _build_tensor(src + 1, 2)
else:
tensor = _build_tensor(src + 1, 10)
tensor = tensor.cuda(rank_to_GPU[rank][0])
opts = AllreduceOptions()
opts.reduceOp = dist.ReduceOp.SUM
if group_id == GroupMember.WORLD:
work = _get_default_group().allreduce([tensor], opts)
else:
work = group_id.allreduce([tensor], opts)
if BACKEND == "gloo":
# Calling result right the work is finished should throw exception.
# Here we have a race condition, we may not assume the work is not
# finished by the time we run next lines.
try:
with self.assertRaisesRegex(
RuntimeError,
"Work needs to be completed before calling result",
):
work.result()
except AssertionError:
# Exception was not raised, ensure is_completed()
self.assertTrue(work.is_completed())
work.wait()
result = work.result()
else:
# In case of NCCL we should be able to retrieve pointer to the result
# even before work is finished.
result = work.result()
work.wait()
expected_value = 2 + (10 * (len(group) - 1))
self.assertEqual(result, [_build_tensor(src + 1, expected_value)])
self._barrier()
def call_dist_op(
self,
profiling_title_postfix,
is_async,
op,
*args,
expect_event=True,
secondary_op_call=None,
profile_cuda=False,
tensor_shapes=None,
**kwargs,
):
op_calls = [lambda: op(*args, **kwargs)]
if secondary_op_call is not None:
op_calls.append(secondary_op_call)
autograd_profiler_ctx = torch.autograd.profiler.profile(
use_cuda=profile_cuda, record_shapes=True
)
# TODO: move this test to use torch.profiler once kineto issues are
# fixed internally.
with autograd_profiler_ctx as prof:
works = [op_call() for op_call in op_calls]
if is_async:
for work in works:
work.wait()
if expect_event and dist.get_backend() in PROFILING_SUPPORTED_BACKENDS:
# We are only interested in the backend's implementation not the dispatcher wrapper.
events = get_profiling_event(
dist.get_backend() + profiling_title_postfix, autograd_profiler_ctx
)
# DETAIL debug mode can use a pg wrapper that issues more collectives
# under the hood
if dist.get_debug_level() != dist.DebugLevel.DETAIL:
self.assertEqual(len(events), len(op_calls))
for e in events:
self.assertTrue(e.is_async)
self.assertEqual(e.count, 1)
self.assertGreaterEqual(e.cpu_time, 0)
# Verify tensor shapes if given
# DETAIL debug mode can use a pg wrapper that issues more collectives
# under the hood
if (
tensor_shapes is not None
and dist.get_debug_level() != dist.DebugLevel.DETAIL
):
self.assertEqual(
e.input_shapes,
tensor_shapes,
f"event shape: {e.input_shapes} vs tensor {tensor_shapes}",
)
# ALL REDUCE
def _test_all_reduce_helper(
self,
group,
group_id,
rank,
op,
master_value,
worker_value,
expected_value,
cuda=False,
rank_to_GPU=None,
dtype=torch.float,
async_op=False,
):
for src in group:
curr_value = master_value if rank == src else worker_value
tensor = _build_tensor(src + 1, dtype=dtype).fill_(curr_value)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
if tensor.dtype == torch.complex64:
tensor_shapes = [torch.view_as_real(tensor).shape]
else:
tensor_shapes = [tensor.shape]
self.call_dist_op(
":all_reduce",
async_op,
dist.all_reduce,
tensor,
op,
group_id,
async_op=async_op,
tensor_shapes=tensor_shapes,
)
# Currently, only Gloo backend has profiling tested with CUDA enabled.
# Only run cuda profiling test for one rank to speed up since
# running with different src_rank does not affect the correctness.
if (
src == 0
and cuda
and dist.get_backend() in CUDA_PROFILING_SUPPORTED_BACKENDS
):
self.call_dist_op(
":all_reduce",
async_op,
dist.all_reduce,
tensor,
op,
group_id,
async_op=async_op,
profile_cuda=True,
tensor_shapes=tensor_shapes,
)
self._barrier()
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_sum(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_sum_async(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
async_op=True,
)
@sandcastle_skip_if(
BACKEND != "gloo" and BACKEND != "nccl",
"Only Gloo and NCCL backends will have CUDA allReduce tested",
)
@skip_if_no_gpu
def test_all_reduce_sum_cuda(self):
torch.cuda.set_device(self.rank)
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
True,
rank_to_GPU,
)
@sandcastle_skip_if(
BACKEND != "gloo" and BACKEND != "nccl",
"Only Gloo and NCCL backends will have CUDA allReduce tested",
)
@skip_if_no_gpu
def test_all_reduce_sum_cuda_async(self):
torch.cuda.set_device(self.rank)
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
True,
rank_to_GPU,
async_op=True,
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_sum_complex(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
complex(2, 3),
complex(10, 11),
complex(2, 3) + (complex(10, 11) * (len(group) - 1)),
dtype=torch.cfloat,
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_complex_unsupported_ops(self):
unsupported_ops = [
dist.ReduceOp.MAX,
dist.ReduceOp.MIN,
dist.ReduceOp.PRODUCT,
dist.ReduceOp.BAND,
dist.ReduceOp.BOR,
dist.ReduceOp.BXOR,
]
group, group_id, rank = self._init_global_test()
for unsupported_op in unsupported_ops:
with self.assertRaisesRegex(
RuntimeError, "all_reduce does not support"
):
dist.all_reduce(
_build_tensor(1, dtype=torch.cfloat), unsupported_op, group_id
)
@sandcastle_skip_if(
BACKEND != "gloo" and BACKEND != "nccl",
"Only Gloo and NCCL backends will have CUDA allReduce tested",
)
@skip_if_no_gpu
def test_all_reduce_sum_cuda_complex(self):
torch.cuda.set_device(self.rank)
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
complex(2, 3),
complex(10, 11),
complex(2, 3) + (complex(10, 11) * (len(group) - 1)),
True,
rank_to_GPU,
dtype=torch.cfloat,
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_product(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_min(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_max(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
@skip_if_small_worldsize
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_group_sum(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@skip_if_small_worldsize
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_group_product(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@skip_if_small_worldsize
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_group_min(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@skip_if_small_worldsize
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_group_max(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_full_group_sum(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_full_group_product(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_full_group_min(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_full_group_max(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
# SPARSE ALL REDUCE
def _test_sparse_all_reduce_sum(self, fn):
group, group_id, rank = self._init_global_test()
tests = simple_sparse_reduce_tests(
rank, dist.get_world_size(), num_inputs=1
)
for (inputs, outputs) in tests:
tensors = [fn(input) for input in inputs]
dist.all_reduce(tensors[0], dist.ReduceOp.SUM, group_id)
self.assertEqual(tensors[0], outputs[0])
@sandcastle_skip_if(
BACKEND != "gloo", "Only Gloo backend support sparse all reduce"
)
def test_sparse_all_reduce_sum(self):
self._test_sparse_all_reduce_sum(lambda t: t)
@sandcastle_skip_if(
BACKEND != "gloo", "Only Gloo backend support sparse all reduce"
)
@skip_if_no_gpu
def test_sparse_all_reduce_sum_cuda(self):
self._test_sparse_all_reduce_sum(lambda t: t.clone().cuda())
# ALL REDUCE - COALESCED
@staticmethod
def _all_reduce_coalesced_sum_test_cases(group_size):
return (
[2, 3, complex(2, 3)],
[10, 11, complex(10, 11)],
[
2 + 10 * (group_size - 1),
3 + 11 * (group_size - 1),
complex(2, 3) + complex(10, 11) * (group_size - 1),
],
[torch.float, torch.float, torch.cfloat],
)
@staticmethod
def _all_reduce_coalesced_product_test_cases(group_size):
return (
[1, 2],
[3, 4],
[1 * 3 ** (group_size - 1), 2 * 4 ** (group_size - 1)],
[torch.float, torch.float],
)
@staticmethod
def _all_reduce_coalesced_min_test_cases(group_size):
return (
[1, 4],
[2, 3],
[1, 3],
[torch.float, torch.float],
)
@staticmethod
def _all_reduce_coalesced_max_test_cases(group_size):
return (
[1, 4],
[2, 3],
[2, 4],
[torch.float, torch.float],
)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_coalesced_max_complex_unsupported(self):
group, group_id, rank = self._init_global_test()
with self.assertRaisesRegex(RuntimeError, "all_reduce does not support"):
dist.all_reduce_coalesced(
[_build_tensor(1, dtype=torch.cfloat)], dist.ReduceOp.MAX, group_id
)
def _test_all_reduce_coalesced_helper(
self,
group,
group_id,
rank,
op,
cuda=False,
rank_to_GPU=None,
):
test_case_func = {
dist.ReduceOp.SUM: self._all_reduce_coalesced_sum_test_cases,
dist.ReduceOp.PRODUCT: self._all_reduce_coalesced_product_test_cases,
dist.ReduceOp.MIN: self._all_reduce_coalesced_min_test_cases,
dist.ReduceOp.MAX: self._all_reduce_coalesced_max_test_cases,
}[op]
master_values, worker_values, expected_values, dtypes = test_case_func(
len(group)
)
for src in group:
curr_values = master_values if rank == src else worker_values
tensors = [
_build_tensor(src + 1, val, dtype=dtype)
for dtype, val in zip(dtypes, curr_values)
]
if cuda:
tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
tensor_shapes = []
for tensor in tensors:
if tensor.dtype == torch.complex64:
tensor_shapes.append(torch.view_as_real(tensor).shape)
else:
tensor_shapes.append(tensor.shape)
self.call_dist_op(
":all_reduce",
False,
dist.all_reduce_coalesced,
tensors,
op,
group_id,
tensor_shapes=tensor_shapes,
)
expected_tensors = [
_build_tensor(src + 1, expected_value, dtype=dtype)
for dtype, expected_value in zip(dtypes, expected_values)
]
self.assertEqual(tensors, expected_tensors)
self._barrier()
@require_backend({"gloo"})
def test_all_reduce_coalesced_sum(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
cuda=False,
rank_to_GPU=None,
)
@require_backend({"gloo"})
def test_all_reduce_coalesced_product(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
cuda=False,
rank_to_GPU=None,
)
@require_backend({"gloo"})
def test_all_reduce_coalesced_min(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.MIN,
cuda=False,
rank_to_GPU=None,
)
@require_backend({"gloo"})
def test_all_reduce_coalesced_max(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_coalesced_helper(
group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None
)
@skip_if_small_worldsize
@require_backend({"gloo"})
def test_all_reduce_coalesced_group_sum(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_coalesced_helper(
group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None
)
@skip_if_small_worldsize
@require_backend({"gloo"})
def test_all_reduce_coalesced_group_product(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
cuda=False,
rank_to_GPU=None,
)
@skip_if_small_worldsize
@require_backend({"gloo"})
def test_all_reduce_coalesced_group_min(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_coalesced_helper(
group, group_id, rank, dist.ReduceOp.MIN, cuda=False, rank_to_GPU=None
)
@skip_if_small_worldsize
@require_backend({"gloo"})
def test_all_reduce_coalesced_group_max(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_coalesced_helper(
group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None
)
@require_backend({"gloo"})
def test_all_reduce_coalesced_full_group_sum(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_coalesced_helper(
group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None
)
@require_backend({"gloo"})
def test_all_reduce_coalesced_full_group_product(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
cuda=False,
rank_to_GPU=None,
)
@require_backend({"gloo"})
def test_all_reduce_coalesced_full_group_min(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.MIN,
cuda=False,
rank_to_GPU=None,
)
@require_backend({"gloo"})
def test_all_reduce_coalesced_full_group_max(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_coalesced_helper(
group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None
)
# SCATTER
def _test_scatter_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float
):
for dest in group:
tensor = _build_tensor(dest + 1, -1, dtype=dtype)
expected_tensor = _build_tensor(dest + 1, rank, dtype=dtype)
tensors = (
[_build_tensor(dest + 1, i, dtype=dtype) for i in group]
if rank == dest
else []
)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
if dtype == torch.complex64:
tensor_shapes = [torch.view_as_real(t).shape for t in tensors]
else:
tensor_shapes = [t.shape for t in tensors]
self.call_dist_op(
":scatter",
False,
dist.scatter,
tensor,
src=dest,
scatter_list=tensors,
group=group_id,
expect_event=False,
tensor_shapes=tensor_shapes,
)
self.assertEqual(tensor, expected_tensor)
self._barrier()
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_scatter_checks(self):
group, group_id, rank = self._init_global_test()
one = torch.ones([1])
# Specify scatter_list argument only on source rank.
output = one.clone() * -1
if rank == 0:
scatter_list = [one.clone() * i for i in group]
dist.scatter(output, src=0, scatter_list=scatter_list)
else:
dist.scatter(output, src=0)
self.assertEqual(output, one * rank)
# Don't specify src argument.
output = one.clone() * -1
if rank == 0:
scatter_list = [one.clone() * i for i in group]
dist.scatter(output, scatter_list=scatter_list)
else:
dist.scatter(output)
self.assertEqual(output, one * rank)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_scatter(self):
group, group_id, rank = self._init_global_test()
self._test_scatter_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA gather")
@skip_if_no_gpu
def test_scatter_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_scatter_helper(group, group_id, rank, True, rank_to_GPU)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_scatter_complex(self):
group, group_id, rank = self._init_global_test()
self._test_scatter_helper(group, group_id, rank, dtype=torch.cfloat)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA gather")
@skip_if_no_gpu
def test_scatter_cuda_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_scatter_helper(group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
@skip_if_small_worldsize
def test_scatter_group(self):
group, group_id, rank = self._init_group_test()
self._test_scatter_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_scatter_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_scatter_helper(group, group_id, rank)
# GATHER
def _test_gather_helper(self, group, group_id, rank, cuda=False, rank_to_GPU=None):
for dest in group:
tensor = _build_tensor(dest + 1, rank)
tensors = (
[_build_tensor(dest + 1, -1) for i in group] if rank == dest else []
)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
self.call_dist_op(
":gather",
False,
dist.gather,
tensor,
dst=dest,
gather_list=tensors,
group=group_id,
expect_event=False,
tensor_shapes=[tensors[0].shape] if len(tensors) > 0 else None,
)
if rank == dest:
expected_tensors = [_build_tensor(dest + 1, i) for i in group]
for t1, t2 in zip(tensors, expected_tensors):
self.assertEqual(t1, t2)
self._barrier()
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_gather_checks(self):
group, group_id, rank = self._init_global_test()
one = torch.ones([1])
# Specify gather_list argument only on destination rank.
if rank == 0:
gather_list = [one.clone() for _ in group]
dist.gather(one * rank, dst=0, gather_list=gather_list)
for i in group:
self.assertEqual(gather_list[i], one * i)
else:
dist.gather(one * rank, dst=0)
# Don't specify dst argument.
if rank == 0:
gather_list = [one.clone() for _ in group]
dist.gather(one * rank, gather_list=gather_list)
for i in group:
self.assertEqual(gather_list[i], one * i)
else:
dist.gather(one * rank)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_gather(self):
group, group_id, rank = self._init_global_test()
self._test_gather_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA gather")
@skip_if_no_gpu
def test_gather_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_gather_helper(group, group_id, rank, True, rank_to_GPU)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
@skip_if_small_worldsize
def test_gather_group(self):
group, group_id, rank = self._init_group_test()
self._test_gather_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_gather_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_gather_helper(group, group_id, rank)
# ALL GATHER
def _test_all_gather_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float
):
for dest in group:
tensor = _build_tensor(dest + 1, rank, dtype=dtype)
tensors = [_build_tensor(dest + 1, -1, dtype=dtype) for i in group]
allgather = dist.all_gather
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
if tensors[0].dtype == torch.complex64:
tensor_shapes = [torch.view_as_real(tensors[0]).shape]
else:
tensor_shapes = [tensors[0].shape]
self.call_dist_op(
":all_gather",
False,
allgather,
tensors,
tensor,
group_id,
False,
tensor_shapes=tensor_shapes,
)
expected_tensors = [
_build_tensor(dest + 1, i, dtype=dtype) for i in group
]
for t1, t2 in zip(tensors, expected_tensors):
self.assertEqual(t1, t2)
self._barrier()
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_gather(self):
group, group_id, rank = self._init_global_test()
self._test_all_gather_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA all gather")
@skip_if_no_gpu
def test_all_gather_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_gather_helper(group, group_id, rank, True, rank_to_GPU)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_gather_complex(self):
group, group_id, rank = self._init_global_test()
self._test_all_gather_helper(group, group_id, rank, dtype=torch.cfloat)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA all gather")
@skip_if_no_gpu
def test_all_gather_cuda_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_gather_helper(
group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat
)
@skip_if_small_worldsize
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_gather_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_gather_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_gather_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_gather_helper(group, group_id, rank)
def _run_all_gather_coalesced_and_verify(
self, output_tensor_lists, input_tensors, expected_tensors, group_id
):
"""
Helper that runs all_gather_coalesced and returns true if output
matches expectations.
"""
tensor_shapes = []
for input_tensor in input_tensors:
if input_tensor.dtype == torch.complex64:
tensor_shapes.append(torch.view_as_real(input_tensor).shape)
else:
tensor_shapes.append(input_tensor.shape)
self.call_dist_op(
":all_gather",
False,
dist.all_gather_coalesced,
output_tensor_lists,
input_tensors,
group_id,
tensor_shapes=tensor_shapes,
)
for l1, l2 in zip(output_tensor_lists, expected_tensors):
for t1, t2 in zip(l1, l2):
if not torch.equal(t1, t2):
return False
return True
def _test_all_gather_coalesced_helper(
self, group, group_id, rank, dtype=torch.float
):
# TODO: Instead we should probably go through _rank_not_in_group
# mechanism to disable sending tensors
if group_id is not None:
for test_case_id in range(2, 5):
# Make sure we create tensors of incompatible sizes, e.g.
# [1], [2x2], [3x3x3] ... to be sent in one batch
input_tensors = [
_build_multidim_tensor(
tensor_id, tensor_id, rank + tensor_id, dtype=dtype
)
for tensor_id in range(1, test_case_id)
]
output_tensor_lists = [
[
_build_multidim_tensor(
tensor_id, tensor_id, -1, dtype=dtype
)
for tensor_id in range(1, test_case_id)
]
for _ in group
]
expected_tensors = [
[
_build_multidim_tensor(
tensor_id, tensor_id, rank_iter + tensor_id, dtype=dtype
)
for tensor_id in range(1, test_case_id)
]
for rank_iter in group
]
assert self._run_all_gather_coalesced_and_verify(
output_tensor_lists, input_tensors, expected_tensors, group_id
), "output tensors do not match expected ouputs"
self._barrier()
@sandcastle_skip_if(
BACKEND in DistTestCases.skip_collective["allgather_coalesced"],
f"{BACKEND} does not support all_gather_coalesced"
)
def test_all_gather_coalesced_simple(self):
group, group_id, rank = self._init_global_test()
self._test_all_gather_coalesced_helper(group, group_id, rank)
@sandcastle_skip_if(
BACKEND in DistTestCases.skip_collective["allgather_coalesced"],
f"{BACKEND} does not support all_gather_coalesced"
)
def test_all_gather_coalesced_complex(self):
group, group_id, rank = self._init_global_test()
self._test_all_gather_coalesced_helper(
group, group_id, rank, dtype=torch.cfloat
)
@skip_if_small_worldsize
@sandcastle_skip_if(
BACKEND in DistTestCases.skip_collective["allgather_coalesced"],
f"{BACKEND} does not support all_gather_coalesced"
)
def test_all_gather_coalesced_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_gather_coalesced_helper(group, group_id, rank)
@sandcastle_skip_if(
BACKEND in DistTestCases.skip_collective["allgather_coalesced"],
f"{BACKEND} does not support all_gather_coalesced"
)
def test_all_gather_coalesced_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_gather_coalesced_helper(group, group_id, rank)
@sandcastle_skip_if(
BACKEND in DistTestCases.skip_collective["allgather_coalesced"],
f"{BACKEND} does not support all_gather_coalesced"
)
def test_all_gather_coalesced_with_empty(self):
group, group_id, rank = self._init_global_test()
input_tensors = [
rank * torch.ones([2, 2]),
torch.ones([0]),
(rank + 1) * torch.ones([3, 3]),
torch.ones([0]),
torch.ones([0]),
]
output_tensors_lists = [
[
-1 * torch.ones([2, 2]),
-1 * torch.ones([0]),
-1 * torch.ones([3, 3]),
-1 * torch.ones([0]),
-1 * torch.ones([0]),
]
for _ in group
]
expected_tensors = [
[
r * torch.ones([2, 2]),
torch.ones([0]),
(r + 1) * torch.ones([3, 3]),
torch.ones([0]),
torch.ones([0]),
]
for r in group
]
assert self._run_all_gather_coalesced_and_verify(
output_tensors_lists, input_tensors, expected_tensors, group_id
)
self._barrier()
# AllToAll
def _test_all_to_all_single_equal_split_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float
):
if group_id is not None:
size = len(group)
in_tensor = torch.ones([size, size], dtype=dtype) * rank
expected_tensor = torch.cat(
[torch.ones([1, size], dtype=dtype) * i for i in group]
)
out_tensor = torch.ones([size, size], dtype=dtype) * -1
if cuda:
in_tensor = in_tensor.cuda(rank_to_GPU[rank][0])
expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])
out_tensor = out_tensor.cuda(rank_to_GPU[rank][0])
if dtype == torch.complex64:
tensor_shapes = [torch.view_as_real(in_tensor).shape]
else:
tensor_shapes = [in_tensor.shape]
self.call_dist_op(
":all_to_all",
False,
dist.all_to_all_single,
out_tensor,
in_tensor,
group=group_id,
tensor_shapes=tensor_shapes,
)
self.assertEqual(out_tensor, expected_tensor)
self._barrier()
def _test_all_to_all_single_unequal_split_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float
):
if group_id is not None:
size = len(group)
in_splits = [i + 1 for i in group]
out_splits = [rank + 1 for _ in group]
in_tensor = torch.ones([sum(in_splits), size], dtype=dtype) * rank
out_tensor = torch.ones([(rank + 1) * size, size], dtype=dtype)
expected_tensor = torch.cat(
[torch.ones([rank + 1, size], dtype=dtype) * i for i in group]
)
if cuda:
in_tensor = in_tensor.cuda(rank_to_GPU[rank][0])
expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])
out_tensor = out_tensor.cuda(rank_to_GPU[rank][0])
dist.all_to_all_single(
out_tensor, in_tensor, out_splits, in_splits, group=group_id
)
self.assertEqual(out_tensor, expected_tensor)
self._barrier()
def _test_all_to_all_helper(
self,
group,
group_id,
rank,
cuda=False,
rank_to_GPU=None,
dtype=torch.float,
):
if group_id is not None:
size = len(group)
in_splits = [i + 1 for i in group]
in_tensors = [
torch.ones([in_splits[i], size], dtype=dtype) * rank
for i, _ in enumerate(group)
]
out_tensors = [
torch.ones([(rank + 1), size], dtype=dtype) for _ in group
]
expected_tensors = [
torch.ones([rank + 1, size], dtype=dtype) * i for i in group
]
if cuda:
in_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in in_tensors]
expected_tensors = [
t.cuda(rank_to_GPU[rank][0]) for t in expected_tensors
]
out_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in out_tensors]
dist.all_to_all(out_tensors, in_tensors, group=group_id)
for t1, t2 in zip(out_tensors, expected_tensors):
self.assertEqual(t1, t2)
self._barrier()
@sandcastle_skip_if(BACKEND != "mpi", "Only MPI supports CPU all_to_all_single")
def test_all_to_all_single_equal_split(self):
group, group_id, rank = self._init_global_test()
self._test_all_to_all_single_equal_split_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single")
@skip_if_no_gpu
def test_all_to_all_single_equal_split_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_equal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@sandcastle_skip_if(BACKEND != "mpi", "Only MPI supports CPU all_to_all_single")
def test_all_to_all_single_equal_split_complex(self):
group, group_id, rank = self._init_global_test()
self._test_all_to_all_single_equal_split_helper(
group, group_id, rank, dtype=torch.cfloat
)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single")
@skip_if_no_gpu
def test_all_to_all_single_equal_split_cuda_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_equal_split_helper(
group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat
)
@sandcastle_skip_if(BACKEND != "mpi", "Only MPI supports CPU all_to_all_single")
def test_all_to_all_single_unequal_split(self):
group, group_id, rank = self._init_global_test()
self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single")
@skip_if_no_gpu
def test_all_to_all_single_unequal_split_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_unequal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@sandcastle_skip_if(BACKEND != "mpi", "Only MPI supports CPU all_to_all_single")
def test_all_to_all_single_unequal_split_complex(self):
group, group_id, rank = self._init_global_test()
self._test_all_to_all_single_unequal_split_helper(
group, group_id, rank, dtype=torch.cfloat
)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single")
@skip_if_no_gpu
def test_all_to_all_single_unequal_split_cuda_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_unequal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
dtype=torch.cfloat,
)
@sandcastle_skip_if(BACKEND != "mpi", "Only MPI supports all_to_all")
def test_all_to_all(self):
group, group_id, rank = self._init_global_test()
self._test_all_to_all_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND != "nccl", "Only NCCL supports CUDA all_to_all")
@skip_if_rocm
def test_all_to_all_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU)
@sandcastle_skip_if(BACKEND != "mpi", "Only MPI supports all_to_all")
def test_all_to_all_complex(self):
group, group_id, rank = self._init_global_test()
self._test_all_to_all_helper(group, group_id, rank, dtype=torch.cfloat)
@sandcastle_skip_if(BACKEND != "nccl", "Only NCCL supports CUDA all_to_all")
@skip_if_rocm
def test_all_to_all_cuda_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_helper(
group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat
)
@sandcastle_skip_if(BACKEND != "mpi", "Only MPI supports CPU all_to_all_single")
@skip_if_small_worldsize
def test_all_to_all_single_equal_split_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_to_all_single_equal_split_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single")
@skip_if_no_gpu
@skip_if_small_worldsize
def test_all_to_all_single_equal_split_group_cuda(self):
group, group_id, rank = self._init_group_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_equal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@sandcastle_skip_if(BACKEND != "mpi", "Only MPI supports CPU all_to_all_single")
@skip_if_small_worldsize
def test_all_to_all_single_unequal_split_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single")
@skip_if_no_gpu
@skip_if_small_worldsize
def test_all_to_all_single_unequal_split_group_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_unequal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@sandcastle_skip_if(BACKEND != "mpi", "Only MPI supports all_to_all")
@skip_if_small_worldsize
def test_all_to_all_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_to_all_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single")
@skip_if_small_worldsize
@skip_if_rocm
def test_all_to_all_group_cuda(self):
group, group_id, rank = self._init_group_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU)
@sandcastle_skip_if(BACKEND != "mpi", "Only MPI supports CPU all_to_all_single")
def test_all_to_all_single_equal_split_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_to_all_single_equal_split_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single")
@skip_if_no_gpu
def test_all_to_all_single_equal_split_full_group_cuda(self):
group, group_id, rank = self._init_full_group_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_equal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@sandcastle_skip_if(BACKEND != "mpi", "Only MPI supports CPU all_to_all_single")
def test_all_to_all_single_unequal_split_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single")
@skip_if_no_gpu
def test_all_to_all_single_unequal_split_full_group_cuda(self):
group, group_id, rank = self._init_full_group_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_single_unequal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@sandcastle_skip_if(BACKEND != "mpi", "Only MPI supports all_to_all")
def test_all_to_all_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_to_all_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND != "nccl", "Only NCCL supports CUDA all_to_all")
@skip_if_rocm
def test_all_to_all_full_group_cuda(self):
group, group_id, rank = self._init_full_group_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU)
# BARRIER
def _test_barrier_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None
):
WAIT_TIME = 0.3 # seconds
for dest in group:
expected_time = torch.DoubleTensor(1).fill_(0.0)
if cuda:
expected_time = expected_time.cuda(rank_to_GPU[rank][0])
if dest == rank:
expected_time.fill_(time.time() + WAIT_TIME)
dist.broadcast(expected_time, dest, group_id)
time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer
dist.barrier(group_id)
else:
dist.broadcast(expected_time, dest, group_id)
dist.barrier(group_id)
self.assertGreaterAlmostEqual(
float(time.time()),
float(expected_time[0]),
"destination rank: %d, my rank: %d" % (dest, rank)
+ " (if you see this failure, please report in #14554)",
)
# Use higher timeout for the instance where the test runs
# against a subgroup and uses a CUDA tensor for expected time.
# The CUDA initialization for the participating processes can
# take long enough for the barrier timeout to trigger on the
# process that doesn't participate in the group.
self._barrier(timeout=20)
@skip_if_no_gpu
@sandcastle_skip_if(BACKEND == "mpi", "MPI doesn't supports GPU barrier")
def test_barrier_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)
@skip_if_small_worldsize
@skip_if_no_gpu
@sandcastle_skip_if(BACKEND == "mpi", "MPI doesn't supports GPU barrier")
def test_barrier_group_cuda(self):
group, group_id, rank = self._init_group_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)
@skip_if_small_worldsize
@skip_if_no_gpu
@sandcastle_skip_if(BACKEND == "mpi", "MPI doesn't supports GPU barrier")
def test_barrier_full_group_cuda(self):
group, group_id, rank = self._init_full_group_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["cpu barrier"], f"{BACKEND} does not support CPU barrier")
def test_barrier(self):
group, group_id, rank = self._init_global_test()
self._test_barrier_helper(group, group_id, rank)
@skip_if_small_worldsize
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["cpu barrier"], f"{BACKEND} does not support CPU barrier")
def test_barrier_group(self):
group, group_id, rank = self._init_group_test()
self._test_barrier_helper(group, group_id, rank)
@sandcastle_skip_if(BACKEND in DistTestCases.skip_collective["cpu barrier"], f"{BACKEND} does not support CPU barrier")
def test_barrier_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_barrier_helper(group, group_id, rank)
def _test_broadcast_multigpu_helper(self, group, group_id, rank, rank_to_GPU):
for src in group:
expected_tensor = _build_tensor(src + 1)
tensors = [
_build_tensor(src + 1, -1).cuda(device=i) for i in rank_to_GPU[rank]
]
if rank == src:
tensors[0] = expected_tensor.cuda(device=rank_to_GPU[rank][0])
dist.broadcast_multigpu(tensors, src, group_id)
for tensor in tensors:
self.assertEqual(tensor, expected_tensor)
self._barrier()
@sandcastle_skip_if(BACKEND == "mpi", "MPI doesn't support broadcast multigpu")
@sandcastle_skip_if(BACKEND == "nccl", "NCCL broadcast multigpu skipped")
@skip_if_no_gpu
def test_broadcast_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_broadcast_multigpu_helper(group, group_id, rank, rank_to_GPU)
def _test_all_reduce_multigpu_helper(
self,
group,
group_id,
rank,
rank_to_GPU,
op,
master_value,
worker_value,
expected_value,
dtype=torch.float,
):
for src in group:
curr_value = master_value if rank == src else worker_value
tensors = [
_build_tensor(src + 1, curr_value, dtype=dtype).cuda(device=i)
for i in rank_to_GPU[rank]
]
self.call_dist_op(
":all_reduce",
False,
dist.all_reduce_multigpu,
tensors,
op,
group_id,
)
expected_tensor = _build_tensor(src + 1, expected_value, dtype=dtype)
for tensor in tensors:
self.assertEqual(tensor, expected_tensor)
self._barrier()
@sandcastle_skip_if(BACKEND == "mpi", "MPI doesn't support broadcast multigpu")
@sandcastle_skip_if(BACKEND == "nccl", "CUDA all_reduce multigpu skipped for NCCL")
@skip_if_no_gpu
def test_all_reduce_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_reduce_multigpu_helper(
group,
group_id,
rank,
rank_to_GPU,
dist.ReduceOp.SUM,
2,
10,
(2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),
)
@sandcastle_skip_if(BACKEND == "mpi", "MPI doesn't support broadcast multigpu")
@sandcastle_skip_if(BACKEND == "nccl", "CUDA all_reduce multigpu skipped for NCCL")
@skip_if_no_gpu
def test_all_reduce_multigpu_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
self._test_all_reduce_multigpu_helper(
group,
group_id,
rank,
rank_to_GPU,
dist.ReduceOp.SUM,
complex(2, 3),
complex(10, 11),
(complex(2, 3) + complex(10, 11) * (len(group) - 1))
* len(rank_to_GPU[0]),
dtype=torch.cfloat,
)
def _test_reduce_multigpu_helper(
self,
group,
group_id,
rank,
rank_to_GPU,
op,
master_value,
worker_value,
expected_value,
):
for src in group:
tensor_value = master_value if rank == src else worker_value
tensors = [
_build_tensor(src + 1, tensor_value).cuda(device=i)
for i in rank_to_GPU[rank]
]
self.call_dist_op(
":reduce",
False,
dist.reduce_multigpu,
tensors,
src,
op,
group_id,
expect_event=len(tensors) == 1,
tensor_shapes=[tensors[0].shape],
)
if rank == src:
expected_tensor = _build_tensor(src + 1, expected_value)
self.assertEqual(tensors[0], expected_tensor)
self._barrier()
@sandcastle_skip_if(
BACKEND != "nccl", "Only Nccl backend supports reduce multigpu"
)
@skip_if_no_gpu
def test_reduce_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
self._test_reduce_multigpu_helper(
group,
group_id,
rank,
rank_to_GPU,
dist.ReduceOp.SUM,
2,
10,
(2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),
)
def _test_all_gather_multigpu_helper(
self, group, group_id, rank, rank_to_GPU, dtype=torch.float
):
for dest in group:
tensors = [
_build_tensor(dest + 1, dtype=dtype).cuda(device=i)
for i in rank_to_GPU[rank]
]
# construct expected output along with
# a place holder to receive all gather results
output_tensors = []
expected_output = []
output_per_gpu = (
[_build_tensor(dest + 1, -1, dtype=dtype)]
* len(rank_to_GPU[0])
* len(group)
)
expected_per_gpu = (
[_build_tensor(dest + 1, dtype=dtype)]
* len(rank_to_GPU[0])
* len(group)
)
for gpu in rank_to_GPU[rank]:
output_tensors.append([t.cuda(device=gpu) for t in output_per_gpu])
expected_output.append(
[t.cuda(device=gpu) for t in expected_per_gpu]
)
self.call_dist_op(
":all_gather",
False,
dist.all_gather_multigpu,
output_tensors,
tensors,
group_id,
expect_event=len(expected_output) == 1,
)
self.assertEqual(output_tensors, expected_output)
self._barrier()
@sandcastle_skip_if(
BACKEND != "nccl", "Only Nccl backend supports allgather multigpu"
)
@skip_if_no_gpu
def test_all_gather_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
self._test_all_gather_multigpu_helper(group, group_id, rank, rank_to_GPU)
@sandcastle_skip_if(
BACKEND != "nccl", "Only Nccl backend supports allgather multigpu"
)
@skip_if_no_gpu
def test_all_gather_multigpu_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
torch.cuda.set_device(device_id)
self._test_all_gather_multigpu_helper(
group, group_id, rank, rank_to_GPU, dtype=torch.cfloat
)
def _model_step(self, model):
for param in model.parameters():
if param.grad is not None:
with torch.no_grad():
param += param.grad
param.grad = None
def _model_step_with_zero_grad(self, model):
for param in model.parameters():
if param.grad is not None:
with torch.no_grad():
param += param.grad
param.grad.requires_grad_(False)
param.grad.zero_()
def _prepare_dummy_data(self, local_bs):
# global_bs for DDP should be divisible by WORLD_SIZE
world_size = int(os.environ["WORLD_SIZE"])
global_bs = world_size * local_bs
input_cpu = torch.randn(global_bs, 2)
target = torch.randn(global_bs, 4)
loss = nn.MSELoss()
return global_bs, input_cpu, target, loss
# END TO END TEST FOR DISTRIBUTEDDATAPARALLEL
def _test_DDP_helper(
self, model, input_var, target, loss, scale_factor=1.0, memory_format=None
):
model.train()
output = model(input_var)
l = loss(output, target) * scale_factor
l.backward()
if memory_format is not None:
self.assertTrue(output.is_contiguous(memory_format=memory_format))
def _assert_equal_param(self, param_gpu, param_DDP):
self.assertEqual(len(param_gpu), len(param_DDP))
for p_gpu, p_DDP in zip(param_gpu, param_DDP):
self.assertEqual(p_gpu, p_DDP)
def _test_DDP_niter(
self,
model_base,
model_DDP,
input,
target,
loss,
local_bs,
rank,
batch_size,
test_save,
offset=None,
world_size=0,
zero_grad=False,
memory_format=None,
n_iter=5,
):
for idx in range(n_iter):
# single cpu/gpu training
self._test_DDP_helper(
model_base, input, target, loss, memory_format=memory_format
)
if offset is None:
offset = rank * local_bs
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
self._test_DDP_helper(
model_DDP,
input[offset : offset + local_bs],
target[offset : offset + local_bs],
loss,
world_size * local_bs / batch_size if world_size != 0 else 1,
memory_format=memory_format,
)
# Update weights and run a second iteration to shake out errors
if zero_grad:
self._model_step_with_zero_grad(model_base)
self._model_step_with_zero_grad(model_DDP)
else:
self._model_step(model_base)
self._model_step(model_DDP)
self._assert_equal_param(
list(model_base.parameters()), list(model_DDP.module.parameters())
)
# Shuffle the input so that DDP input is different
input = input[torch.randperm(batch_size)]
# save the model in the middle and reload
if test_save and idx == 2 and INIT_METHOD.startswith("file://"):
with tempfile.NamedTemporaryFile() as tmp:
if sys.platform == "win32":
torch.save(model_DDP, tmp)
tmp.seek(0)
model_DDP = torch.load(tmp)
else:
torch.save(model_DDP, tmp.name)
model_DDP = torch.load(tmp.name)
with tempfile.TemporaryFile() as tmp_file:
torch.save(model_DDP, tmp_file)
tmp_file.seek(0)
saved_model = torch.load(tmp_file)
for k in model_DDP.state_dict():
self.assertEqual(model_DDP.state_dict()[k], saved_model.state_dict()[k])
def _test_DistributedDataParallel(
self,
gpu_subset,
rank,
output_device=None,
gradient_as_bucket_view=False,
static_graph=False,
set_static_graph_twice=False,
):
# Run a simple end to end DDP model, use result of single node model
# as baseline
# cpu training setup
model = DDP_NET
# single gpu training setup
model_gpu = copy.deepcopy(model)
model_gpu.cuda(gpu_subset[0])
# DDP training setup
model_DDP = copy.deepcopy(model)
model_DDP.cuda(gpu_subset[0])
model_DDP = nn.parallel.DistributedDataParallel(
model_DDP,
device_ids=gpu_subset,
gradient_as_bucket_view=gradient_as_bucket_view,
static_graph=static_graph,
)
if set_static_graph_twice:
model_DDP._set_static_graph()
# test serializable/unserializable
with tempfile.NamedTemporaryFile() as tmp:
if sys.platform == "win32":
torch.save(model_DDP, tmp)
tmp.seek(0)
model_DDP = torch.load(tmp)
else:
torch.save(model_DDP, tmp.name)
model_DDP = torch.load(tmp.name)
# dummy data initialization
local_bs = len(gpu_subset)
global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)
# check two model parameters over 5 iterations
self._test_DDP_niter(
model_gpu,
model_DDP,
input_cpu.cuda(gpu_subset[0]),
target.cuda(gpu_subset[0]),
loss,
local_bs,
rank,
global_bs,
True,
)
self._barrier()
def _test_DistributedDataParallelCPU(self, gradient_as_bucket_view=False):
# Run a simple end to end DDP-CPU model, use result of single node
# model as baseline
group, group_id, rank = self._init_global_test()
# cpu training setup
model_base = DDP_NET
# DDP-CPU training setup
model_DDP = copy.deepcopy(model_base)
model_DDP = nn.parallel.DistributedDataParallel(
model_DDP, gradient_as_bucket_view=gradient_as_bucket_view
)
# dummy data initialization
local_bs = 2
global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)
# check two model parameters over 5 iterations
self._test_DDP_niter(
model_base,
model_DDP,
input_cpu,
target,
loss,
local_bs,
rank,
global_bs,
False,
zero_grad=True,
)
self._barrier()
return model_DDP
@sandcastle_skip_if(BACKEND == "nccl", "nccl does not support DDP on CPU models")
def test_DistributedDataParallelCPU(self):
self._test_DistributedDataParallelCPU()
@sandcastle_skip_if(BACKEND == "nccl", "nccl does not support DDP on CPU models")
def test_DistributedDataParallelCPU_grad_is_view(self):
self._test_DistributedDataParallelCPU(gradient_as_bucket_view=True)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_DistributedDataParallel_requires_grad(self):
# a module without gradients shouldn't be accepted
self.assertRaises(
RuntimeError, lambda: nn.parallel.DistributedDataParallel(nn.Module())
)
self._barrier()
@sandcastle_skip_if(
BACKEND == "nccl",
"Gloo-only test"
)
def test_ddp_create_graph(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.tensor(1.))
def forward(self):
return self.p.pow(2)
model = Model()
ddp_model = torch.nn.parallel.DistributedDataParallel(model)
for _ in range(6):
# Verify DDP doesn't throw when ran with create_graph=True.
# Although we do warn about potential issues, please see
# https://github.com/pytorch/pytorch/issues/63929 for details.
ddp_model().backward(create_graph=True)
# grad tensors should require grad.
self.assertTrue(
all([param.requires_grad for param in ddp_model.parameters()])
)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_DistributedDataParallel_non_default_stream(self):
stream = torch.cuda.Stream(self.rank)
rank = self.rank
with torch.cuda.stream(stream):
net = torch.nn.parallel.DistributedDataParallel(
torch.nn.Linear(1, 1, bias=False).cuda(rank), device_ids=[rank]
)
for i in range(1000):
# Clear gradients manually
grad = net.module.weight.grad
if grad is not None:
grad.requires_grad_(False)
grad.zero_()
# Forward + BW
batch = torch.tensor([rank]).float().cuda(rank)
loss = net(batch).sum()
loss.backward()
# For each worker, the gradient on the weight should be worker_rank.
grad = net.module.weight.grad
avg = grad.clone()
# All-reducing the gradient averages should give us the gradient
# average. If not, then one of the workers has not correctly
# written back the averaged gradient before this all-reduce call.
dist.all_reduce(avg)
world_size = int(os.environ["WORLD_SIZE"])
avg.div_(world_size)
expected_grad = sum(i for i in range(world_size)) / world_size
self.assertEqual(
avg[0, 0],
expected_grad,
msg=f"Expected gradient of {expected_grad} but got {avg} on rank {self.rank}",
)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["cuda"],
f"The {BACKEND} backend does not support DDP communication hook on CUDA devices"
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_ddp_comm_hook_logging(self):
hooks = [
default.allreduce_hook,
default.fp16_compress_hook,
powerSGD.powerSGD_hook,
powerSGD.batched_powerSGD_hook,
quantization_hooks.quantization_pertensor_hook,
quantization_hooks.quantization_perchannel_hook,
]
cpp_builtin_hooks = [
dist.BuiltinCommHookType.ALLREDUCE,
dist.BuiltinCommHookType.FP16_COMPRESS,
]
for hook in hooks:
ddp_model = torch.nn.parallel.DistributedDataParallel(
torch.nn.Linear(1, 1, bias=False).cuda(self.rank),
device_ids=[self.rank],
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
# Hook not registered yet, so should be empty
self.assertEqual(ddp_logging_data.get("comm_hook"), None)
ddp_model.register_comm_hook(None, hook)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertEqual(ddp_logging_data.get("comm_hook"), hook.__qualname__)
for hook in cpp_builtin_hooks:
ddp_model = torch.nn.parallel.DistributedDataParallel(
torch.nn.Linear(1, 1, bias=False).cuda(self.rank),
device_ids=[self.rank],
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
# Hook not registered yet, so should be empty
self.assertEqual(ddp_logging_data.get("comm_hook"), None)
ddp_model._register_builtin_comm_hook(hook)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertEqual(ddp_logging_data.get("comm_hook"), str(hook))
# No hook registered
ddp_model = torch.nn.parallel.DistributedDataParallel(
torch.nn.Linear(1, 1, bias=False).cuda(self.rank),
device_ids=[self.rank],
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
# Hook not registered yet, so should be empty
self.assertEqual(ddp_logging_data.get("comm_hook"), None)
# After second forward pass, hook should still be empty string
for i in range(2):
inp = torch.ones(1, 1, device=self.rank)
loss = ddp_model(inp).sum()
loss.backward()
ddp_logging_data = ddp_model._get_ddp_logging_data()
# Note: DETAIL debug mode logs DDP logging data to stdout and
# thus accesses std::map, which fills in a default value for the
# type if it didn't exist.
self.assertEqual(ddp_logging_data.get("comm_hook", ""), "")
def _test_ddp_hook_with_optimizer_parity(
self, grad_as_bucket_view, static_graph, optim_cls,
optimize_subset, *functional_optim_args, **functional_optim_kwargs
):
rank = self.rank
torch.cuda.set_device(rank)
torch.manual_seed(rank)
torch.cuda.manual_seed(rank)
models_to_test = [
(LargeNet(), torch.randn(1, 1000).cuda()),
]
if HAS_TORCHVISION:
models_to_test.append(
(torchvision.models.resnet50(), torch.randn(1, 3, 3, 1000).cuda())
)
for (model, inp) in models_to_test:
# Enable determinism in cudnn operators
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
# Create DDP model that runs optimizer in fused fashion.
ddp_model_with_optimizer_hook = (
torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(model).cuda(),
device_ids=[self.rank],
gradient_as_bucket_view=grad_as_bucket_view,
static_graph=static_graph,
)
)
# Create DDP model with no hook that does optimizer after
# backward.
ddp_model_with_no_hook = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(model).cuda(),
device_ids=[self.rank],
gradient_as_bucket_view=grad_as_bucket_view,
static_graph=static_graph,
)
hook_params = ddp_model_with_optimizer_hook.parameters()
no_hook_params = ddp_model_with_no_hook.parameters()
if optimize_subset:
hook_params = list(hook_params)
no_hook_params = list(no_hook_params)
self.assertGreater(len(hook_params), 0)
hook_params = [hook_params[0]]
no_hook_params = [no_hook_params[0]]
# Register a fused optimizer that will run optimizer in step
# with allreduce.
if optimize_subset:
# API where optim_params is specified.
ddp_model_with_optimizer_hook._register_fused_optim(
optim_cls,
*functional_optim_args,
optim_params=hook_params,
**functional_optim_kwargs,
)
else:
# API where optim_params is omitted
ddp_model_with_optimizer_hook._register_fused_optim(
optim_cls,
*functional_optim_args,
**functional_optim_kwargs,
)
optimizer_no_hook = optim_cls(
no_hook_params,
*functional_optim_args,
**functional_optim_kwargs,
)
# Verify parameters are equal initially.
for hook_param, allreduce_param in zip(
ddp_model_with_optimizer_hook.parameters(),
ddp_model_with_no_hook.parameters(),
):
self.assertEqual(hook_param, allreduce_param)
# Save old parameters to later verify optimizer modified them.
opt_hook_init_params = copy.deepcopy(
list(ddp_model_with_optimizer_hook.parameters())
)
# Run optimizer with hook model.
for i in range(6):
ddp_model_with_optimizer_hook.zero_grad()
out = ddp_model_with_optimizer_hook(inp)
loss = out.sum()
loss.backward()
dist.barrier()
# Run regular model.
for i in range(6):
ddp_model_with_no_hook.zero_grad()
out = ddp_model_with_no_hook(inp)
loss = out.sum()
loss.backward()
optimizer_no_hook.step()
dist.barrier()
# Now verify parameters are equal.
for hook_param, allreduce_param in zip(
ddp_model_with_optimizer_hook.parameters(),
ddp_model_with_no_hook.parameters(),
):
self.assertEqual(hook_param, allreduce_param)
# Verify optimizer modified appropriate parameter set,
# otherwise they'd be trivially equal above.
if optimize_subset:
self.assertNotEqual(
opt_hook_init_params[0],
list(ddp_model_with_optimizer_hook.parameters())[0]
)
# Untouched params should be equal
self.assertEqual(
opt_hook_init_params[1:],
list(ddp_model_with_optimizer_hook.parameters())[1:]
)
else:
self.assertNotEqual(
opt_hook_init_params,
list(ddp_model_with_optimizer_hook.parameters()),
)
dist.barrier()
@sandcastle_skip_if(
BACKEND == "nccl",
"Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259"
)
@skip_if_lt_x_gpu(2)
@parametrize("grad_as_bucket_view", [True, False])
@parametrize("static_graph", [True, False])
@parametrize("optimize_subset", [True, False])
def test_ddp_hook_with_optimizer_parity_adamw(
self,
grad_as_bucket_view,
static_graph,
optimize_subset,
):
adamw_lr = 1e-2
adamw_betas = (0.9, 0.99)
adamw_eps = 1e-6
self._test_ddp_hook_with_optimizer_parity(
grad_as_bucket_view,
static_graph,
torch.optim.AdamW,
optimize_subset,
adamw_lr,
betas=adamw_betas,
eps=adamw_eps,
)
@sandcastle_skip_if(
BACKEND == "nccl",
"Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259"
)
@skip_if_lt_x_gpu(2)
@parametrize("optimize_subset", [True, False])
def test_ddp_hook_with_optimizer_parity_adam(self, optimize_subset):
adam_lr = 1e-2
adam_betas = (0.9, 0.99)
adam_eps = 1e-6
self._test_ddp_hook_with_optimizer_parity(
True, # grad as bucket view
False, # static graph
torch.optim.Adam,
optimize_subset,
adam_lr,
betas=adam_betas,
eps=adam_eps,
)
@sandcastle_skip_if(
BACKEND == "nccl",
"Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259"
)
@skip_if_lt_x_gpu(2)
@parametrize("optimize_subset", [True, False])
def test_ddp_hook_with_optimizer_parity_sgd(self, optimize_subset):
sgd_lr = 1e-2
sgd_momentum = 0.9
sgd_weight_decay = 0.01
# Not testing grad_as_bucket_view and static_graph as they are
# tested in AdamW test above.
self._test_ddp_hook_with_optimizer_parity(
True, # grad as bucket view
False, # static_graph
torch.optim.SGD,
optimize_subset,
sgd_lr,
momentum=sgd_momentum,
weight_decay=sgd_weight_decay,
)
def _test_ddp_hook_parity(self, state, hook, num_validated_iters=100):
rank = self.rank
m = torch.nn.Linear(1, 5)
try:
process_group = state.process_group
except AttributeError:
process_group = state
net_with_hook = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(m).to(rank),
device_ids=[rank],
process_group=process_group,
)
net_with_hook.register_comm_hook(state=state, hook=hook)
net_without_hook = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(m).to(rank),
device_ids=[rank],
process_group=process_group,
)
for i in range(100):
# Clear gradients manually.
for g in [
net_without_hook.module.weight.grad,
net_with_hook.module.weight.grad,
]:
if g is not None:
g.requires_grad_(False)
g.zero_()
# Forward + BW
batch = torch.tensor([rank]).float().cuda(rank)
loss = net_without_hook(batch).sum()
loss.backward()
# For each worker, the gradient on the weight should be worker_rank.
grad = net_without_hook.module.weight.grad
avg = grad.clone()
expected_grad = (
sum(i for i in range(dist.get_world_size())) / dist.get_world_size()
)
loss_hook = net_with_hook(batch).sum()
loss_hook.backward()
grad_hook = net_with_hook.module.weight.grad
avg_hook = grad_hook.clone()
if i < num_validated_iters:
# Verify hook grad with expected.
self.assertEqual(
avg_hook[0, 0].item(),
expected_grad,
msg=f"Expected hook grad of {expected_grad} but got {avg_hook[0, 0]}",
)
# Verify hook grad with vanilla allreduce
self.assertEqual(
avg_hook[0, 0],
avg[0, 0],
msg=f"Expected hook grad to be close to allreduce {avg[0, 0]}, but got {avg_hook[0, 0]}",
)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["cuda"],
f"The {BACKEND} backend does not support DDP communication hook on CUDA devices"
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_ddp_hook_parity_allreduce(self):
self._test_ddp_hook_parity(state=None, hook=default.allreduce_hook)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["cuda"],
f"The {BACKEND} backend does not support DDP communication hook on CUDA devices"
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_ddp_hook_parity_allreduce_process_group(self):
# process_group is passed in to both DDP and comm. hook
world_size = dist.get_world_size()
rank_to_GPU = init_multigpu_helper(world_size, BACKEND)
gpus = [rank_to_GPU[int(r)][0] for r in range(world_size)]
process_group = torch.distributed.new_group(gpus)
self._test_ddp_hook_parity(state=process_group, hook=default.allreduce_hook)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["cuda"],
f"The {BACKEND} backend does not support DDP communication hook on CUDA devices"
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_ddp_hook_parity_powerSGD(self):
for warm_start in [True, False]:
powersgd_state = powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
start_powerSGD_iter=2,
warm_start=warm_start,
)
self._test_ddp_hook_parity(
state=powersgd_state, hook=powerSGD.powerSGD_hook
)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["cuda"],
f"The {BACKEND} backend does not support DDP communication hook on CUDA devices"
)
@sandcastle_skip_if(
NO_MULTIPROCESSING_SPAWN,
"Disabled for environments that \
don't support multiprocessing with spawn start method",
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_ddp_hook_parity_post_localSGD(self):
# Although we start run local SGD at iteration 10, since we still use the global process group to run it,
# the post-LocalSGD actually still allreduces gradients globally for the remaining iterations.
state = post_localSGD.PostLocalSGDState(
process_group=None, subgroup=dist.group.WORLD, start_localSGD_iter=10
)
self._test_ddp_hook_parity(
state=state, hook=post_localSGD.post_localSGD_hook
)
# Only validate the warmup iterations before local SGD is applied,
# because when `post_local_gradient_allreduce` is disabled, the gradients will not be synchronized at all.
# Note that in practice a model averager has to be applied to run model averaging,
# so local gradient averaging is not necessary.
start_localSGD_iter = 10
state = post_localSGD.PostLocalSGDState(
process_group=None,
subgroup=dist.group.WORLD,
start_localSGD_iter=start_localSGD_iter,
post_local_gradient_allreduce=False,
)
self._test_ddp_hook_parity(
state=state, hook=post_localSGD.post_localSGD_hook, num_validated_iters=start_localSGD_iter
)
# When `subgroup` is None, it is equivalent to the subgroup on the each node.
# For this single-node test environment, the intra-node process group is equivalent to
# the global process group.
if self.world_size == dist.get_world_size():
state = post_localSGD.PostLocalSGDState(
process_group=None, subgroup=None, start_localSGD_iter=10
)
self._test_ddp_hook_parity(
state=state, hook=post_localSGD.post_localSGD_hook
)
# Since we start local SGD later than the total number of 100 iterations,
# no local SGD actually is executed, and we don't even need to provide a subgroup for this case.
state = post_localSGD.PostLocalSGDState(
process_group=None, subgroup=None, start_localSGD_iter=1000
)
self._test_ddp_hook_parity(
state=state, hook=post_localSGD.post_localSGD_hook
)
def _prepare_single_device_module(
self,
rank,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
model = Net()
device = devices[0] if devices else torch.device("cuda:%d" % rank)
ddp_model = DistributedDataParallel(
copy.deepcopy(model).to(device),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
model.to(device)
input = torch.randn(global_batch_size, 2).to(device)
target = torch.randn(global_batch_size, 4).to(device)
return model, ddp_model, input, target
def _prepare_cpu_module(
self,
process_group,
global_batch_size,
gradient_as_bucket_view=False,
):
model = Net()
ddp_model = DistributedDataParallel(
copy.deepcopy(model),
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
input = torch.randn(global_batch_size, 2)
target = torch.randn(global_batch_size, 4)
return model, ddp_model, input, target
def _test_accumulate_gradients_no_sync(
self, num_iters=2, ddp_comm_hook=None, gradient_as_bucket_view=False
):
"""
This is the recommended way to implement accumulate grads.
If ``ddp_comm_hook`` input was specified, it will also register that hook
to the ``ddp_model``. The hook fed into this function should not change
the resulting gradients.
"""
group, group_id, rank = self._init_global_test()
world_size = get_world_size()
# FIXME: Add testing for gloo/CUDA
if BACKEND == "mpi" or BACKEND == "gloo":
global_batch_size = world_size
local_batch_size = 1
model, ddp_model, input, target = self._prepare_cpu_module(
group_id, global_batch_size, gradient_as_bucket_view
)
if BACKEND == "nccl":
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
int_devices = rank_to_GPU[rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
global_batch_size = world_size
local_batch_size = len(devices)
model, ddp_model, input, target = self._prepare_single_device_module(
rank,
group_id,
devices,
devices,
global_batch_size,
gradient_as_bucket_view,
)
if ddp_comm_hook is not None:
ddp_model.register_comm_hook(group_id, ddp_comm_hook)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
# ensure accumulate grads works with no_grad => no grads are accumulated.
with torch.no_grad():
with ddp_model.no_sync():
ddp_model.train()
ddp_model(input)
# check two model parameters over num_iters iterations
for iteration in range(num_iters):
step_model(model, input, target)
ddp_input = input[
rank * local_batch_size : (rank + 1) * local_batch_size
]
ddp_target = target[
rank * local_batch_size : (rank + 1) * local_batch_size
]
if iteration % 2 == 0:
# accumulate grads locally
with ddp_model.no_sync():
step_model(ddp_model, ddp_input, ddp_target)
else:
# sync grads
step_model(ddp_model, ddp_input, ddp_target)
for i, j in zip(model.parameters(), ddp_model.parameters()):
if not i.requires_grad:
continue
if iteration % 2 == 0:
self.assertNotEqual(i.grad, j.grad)
else:
self.assertEqual(i.grad, j.grad)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
@sandcastle_skip_if(
BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo",
"get_future is only supported on mpi, nccl and gloo",
)
@nccl_skip_if_lt_x_gpu(BACKEND, 2)
def test_accumulate_gradients_no_sync(self):
"""
Runs _test_accumulate_gradients_no_sync using default inputs
"""
self._test_accumulate_gradients_no_sync()
@sandcastle_skip_if(
BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo",
"get_future is only supported on mpi, nccl and gloo",
)
@nccl_skip_if_lt_x_gpu(BACKEND, 2)
def test_accumulate_gradients_no_sync_grad_is_view(self):
"""
Runs _test_accumulate_gradients_no_sync using default inputs
"""
self._test_accumulate_gradients_no_sync(gradient_as_bucket_view=True)
@sandcastle_skip_if(
BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo",
"get_future is only supported on mpi, nccl and gloo",
)
@nccl_skip_if_lt_x_gpu(BACKEND, 2)
def test_accumulate_gradients_no_sync_allreduce_hook(self):
"""
Runs multiple iterations on _test_accumulate_gradients_no_sync
using allreduce hook and validates whether future result was properly
passed as gradients in reducer.
"""
world_size = get_world_size()
def allreduce_hook(
group_id: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / world_size]
return (
group_id.allreduce(tensors)
.get_future()
.then(lambda fut: fut.value()[0])
)
self._test_accumulate_gradients_no_sync(
num_iters=4, ddp_comm_hook=allreduce_hook
)
@sandcastle_skip_if(
BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo",
"get_future is only supported on mpi, nccl and gloo",
)
@nccl_skip_if_lt_x_gpu(BACKEND, 2)
def test_accumulate_gradients_no_sync_allreduce_with_then_hook(self):
"""
Runs multiple iterations on _test_accumulate_gradients_no_sync using allreduce
hook that also uses then callbacks. In first then callback result is multiplied
by 2, and the second callback divides the result by 2 * world_size. It validates
whether final result was properly passed as gradients in reducer.
"""
world_size = get_world_size()
def allreduce_with_then_hook(
group_id: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
fut = group_id.allreduce([bucket.buffer()]).get_future()
def mult(fut):
# Multiply the result by 2.
return 2 * fut.wait()[0]
def div(fut):
# Divide the result by 2 * world_size.
return fut.wait() / (2 * world_size)
return fut.then(mult).then(div)
self._test_accumulate_gradients_no_sync(
num_iters=4, ddp_comm_hook=allreduce_with_then_hook
)
@sandcastle_skip_if(
BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo",
"get_future is only supported on mpi, nccl and gloo",
)
@nccl_skip_if_lt_x_gpu(BACKEND, 2)
def test_get_future(self):
def mult(fut):
return [t * 3 for t in fut.wait()]
def add(fut):
return [t + 1 for t in fut.wait()]
group, group_id, rank = self._init_global_test()
input = _build_tensor(3, 2)
if BACKEND == "nccl":
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
device_id = rank_to_GPU[rank][0]
input = input.to(device_id)
fut = group_id.allreduce([input]).get_future()
res = fut.then(mult).then(add).wait()
expected = _build_tensor(3, 2 * len(group) * 3 + 1)
self.assertEqual(res[0], expected)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_no_gpu
def test_DistributedDataParallel(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND)
gpus = list(rank_to_GPU[rank])
for use_bucket_view, static_graph in itertools.product(
(False, True), (False, True)
):
self._test_DistributedDataParallel(
gpu_subset=gpus,
rank=rank,
gradient_as_bucket_view=use_bucket_view,
static_graph=static_graph,
)
# test set static graph twice
self._test_DistributedDataParallel(
gpu_subset=gpus,
rank=rank,
gradient_as_bucket_view=use_bucket_view,
static_graph=static_graph,
set_static_graph_twice=True,
)
# test output_device
self._test_DistributedDataParallel(
gpu_subset=gpus,
rank=rank,
output_device=torch.device("cuda"),
gradient_as_bucket_view=use_bucket_view,
static_graph=static_graph,
)
# test device_ids
gpus_list = [torch.device("cuda:" + str(i)) for i in gpus]
self._test_DistributedDataParallel(
gpu_subset=gpus_list,
rank=rank,
output_device=torch.device("cuda"),
gradient_as_bucket_view=use_bucket_view,
static_graph=static_graph,
)
def _test_DistributedDataParallel_with_amp(self, grad_is_view=False):
torch.manual_seed(31415)
# Creates model and optimizer in default precision
model = copy.deepcopy(DDP_NET).cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.03)
# Creates a GradScaler once at the beginning of training.
scaler = GradScaler()
ddp_model = nn.parallel.DistributedDataParallel(
model, device_ids=[self.rank], gradient_as_bucket_view=grad_is_view
)
input = torch.randn(dist.get_world_size() * 2, 2).cuda()
target = torch.randn(dist.get_world_size() * 2, 4).cuda()
loss_fn = nn.MSELoss()
# verify grads are none before training
for p in ddp_model.parameters():
self.assertTrue(p is not None)
self.assertTrue(p.grad is None)
for idx in range(20):
optimizer.zero_grad()
# Runs the forward pass with autocasting.
with autocast():
output = ddp_model(input)
loss = loss_fn(output, target)
# Scales loss. Calls backward() on scaled loss to create scaled gradients.
# Backward passes under autocast are not recommended.
# Backward ops run in the same dtype autocast chose for corresponding forward ops.
scaler.scale(loss).backward()
# verify grads are not none and are valid during training
for p in ddp_model.parameters():
if p.requires_grad:
self.assertTrue(p.grad is not None)
self.assertFalse(p.grad.isnan().any())
self.assertFalse(p.grad.isinf().any())
# scaler.step() first unscales the gradients of the optimizer's assigned params.
# If these gradients do not contain infs or NaNs, optimizer.step() is then called,
# otherwise, optimizer.step() is skipped.
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + idx)
input = input[torch.randperm(dist.get_world_size() * 2)]
return ddp_model
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_no_gpu
def test_DistributedDataParallel_with_amp_and_grad_is_view(self):
torch.cuda.set_device(self.rank)
ddp_model_grad_not_view = self._test_DistributedDataParallel_with_amp(
grad_is_view=False
)
ddp_model_grad_is_view = self._test_DistributedDataParallel_with_amp(
grad_is_view=True
)
for i, j in zip(
ddp_model_grad_not_view.parameters(),
ddp_model_grad_is_view.parameters(),
):
self.assertEqual(i, j)
def _test_DistributedDataParallel_SyncBatchNorm(
self,
gpu_subset,
rank,
local_bs,
global_bs,
offset,
output_device=None,
affine=True,
):
# Run a simple end to end DDP model, use result of single node model
# as baseline
# cpu training setup
model = BN_NET if affine else BN_NET_NO_AFFINE
# single gpu training setup
model_gpu = copy.deepcopy(model)
model_gpu.cuda(gpu_subset[0])
# DDP training setup
model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))
model_DDP.cuda(gpu_subset[0])
model_DDP = nn.parallel.DistributedDataParallel(
model_DDP, device_ids=gpu_subset
)
# test serializable/unserializable
with tempfile.NamedTemporaryFile() as tmp:
if sys.platform == "win32":
torch.save(model_DDP, tmp)
tmp.seek(0)
model_DDP = torch.load(tmp)
else:
torch.save(model_DDP, tmp.name)
model_DDP = torch.load(tmp.name)
# data initialization
input_cpu = torch.randn(global_bs, 2)
target = torch.randn(global_bs, 4)
loss = nn.MSELoss()
# check two model parameters over 5 iterations
self._test_DDP_niter(
model_gpu,
model_DDP,
input_cpu.cuda(gpu_subset[0]),
target.cuda(gpu_subset[0]),
loss,
local_bs,
rank,
global_bs,
True,
offset,
dist.get_world_size(),
5 if affine else 2,
)
self._barrier()
def _test_post_localSGD_optimizer_parity(self, create_averager, grad_is_view):
learning_rate = 0.03
net = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(DDP_NET).cuda(),
device_ids=[self.rank],
gradient_as_bucket_view=grad_is_view,
)
averager = create_averager()
opt = torch.optim.SGD(net.parameters(), lr=learning_rate)
net_using_post_localSGD_opt = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(DDP_NET).cuda(),
device_ids=[self.rank],
gradient_as_bucket_view=grad_is_view,
)
# Process group cannot be pickled in some environments,
# so cannot deep copy an averager. See:
# https://github.com/pytorch/pytorch/pull/74737#pullrequestreview-922487496
averager2 = create_averager()
post_localSGD_opt = self._create_post_localSGD_optimizer(
net_using_post_localSGD_opt,
learning_rate,
averager2
)
input = torch.randn(dist.get_world_size() * 2, 2).cuda()
target = torch.randn(dist.get_world_size() * 2, 4).cuda()
loss_fn = nn.MSELoss()
for _ in range(20):
self._perform_a_train_step(opt, net, loss_fn, input, target)
averager.average_parameters(net.parameters())
self._perform_a_train_step(
post_localSGD_opt,
net_using_post_localSGD_opt,
loss_fn,
input,
target
)
for p1, p2 in zip(net.parameters(), net_using_post_localSGD_opt.parameters()):
self.assertEqual(p1.data, p2.data)
# Also check if the built-in step counters are the same to prevent a bug like #74737.
self.assertEqual(averager.step, averager2.step)
def _create_periodic_model_averager(self):
return averagers.PeriodicModelAverager(period=4, warmup_steps=10)
def _create_post_localSGD_optimizer(self, net, learning_rate, averager):
return post_localSGD_optimizer.PostLocalSGDOptimizer(
optim=torch.optim.SGD(net.parameters(), lr=learning_rate),
averager=averager,
)
def _perform_a_train_step(self, optimizer, net, loss_fn, input, target):
optimizer.zero_grad()
output = net(input)
loss = loss_fn(output, target)
loss.backward()
optimizer.step()
def _test_post_localSGD_optimizer_step_reload(self, create_averager):
learning_rate = 0.03
chkpt_file = tempfile.gettempdir() + "/checkpoint.pt"
net_using_post_localSGD_opt = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(DDP_NET).cuda(),
device_ids=[self.rank]
)
averager = create_averager()
post_localSGD_opt = self._create_post_localSGD_optimizer(
net_using_post_localSGD_opt,
learning_rate,
averager
)
averager2 = create_averager()
dummy_post_localSGD_opt = self._create_post_localSGD_optimizer(
net_using_post_localSGD_opt,
learning_rate,
averager2
)
input = torch.randn(dist.get_world_size() * 2, 2).cuda()
target = torch.randn(dist.get_world_size() * 2, 4).cuda()
loss_fn = nn.MSELoss()
for _ in range(20):
self._perform_a_train_step(
post_localSGD_opt,
net_using_post_localSGD_opt,
loss_fn,
input,
target
)
if self.rank == 0:
torch.save({'optimizer_state_dict': post_localSGD_opt.state_dict()}, chkpt_file)
dist.barrier()
map_location = {'cuda:%d' % 0: 'cuda:%d' % self.rank}
checkpoint = torch.load(chkpt_file, map_location=map_location)
dummy_post_localSGD_opt.load_state_dict(checkpoint['optimizer_state_dict'])
# Check that we didn't hit the trivial case
self.assertNotEqual(averager2.step, 0)
# Check if dummy averager was initialized to a correct value
self.assertEqual(averager.step, averager2.step)
# Remove 'step' entry from a checkpoint.
# And make sure it is not in the state dictionary
del checkpoint['optimizer_state_dict']['step']
self.assertNotIn('step', checkpoint['optimizer_state_dict'])
# Check if checkpoint without a 'step' entry invokes a warning
with self.assertWarnsRegex(
expected_warning=UserWarning,
expected_regex="Loaded state dict does not contain a step counter for an averager. "
"Setting step counter to 0."
):
dummy_post_localSGD_opt.load_state_dict(checkpoint['optimizer_state_dict'])
self.assertEqual(averager2.step, 0)
if self.rank == 0:
os.remove(chkpt_file)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_post_localSGD_optimizer_parity(self):
torch.cuda.set_device(self.rank)
self._test_post_localSGD_optimizer_parity(
self._create_periodic_model_averager,
grad_is_view=False,
)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_post_localSGD_optimizer_parity_grad_is_view(self):
torch.cuda.set_device(self.rank)
self._test_post_localSGD_optimizer_parity(
self._create_periodic_model_averager,
grad_is_view=True,
)
def _create_hierarchical_model_averager(self):
period_group_size_dict = OrderedDict([(2, 2), (4, dist.get_world_size())])
return hierarchicalSGD.HierarchicalModelAverager(
period_group_size_dict=period_group_size_dict, warmup_steps=4
)
@skip_if_lt_x_gpu(4)
@skip_if_odd_worldsize
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_post_localSGD_optimizer_parity_with_hierarchical_sgd(self):
torch.cuda.set_device(self.rank)
self._test_post_localSGD_optimizer_parity(
self._create_hierarchical_model_averager,
grad_is_view=False,
)
@skip_if_lt_x_gpu(4)
@skip_if_odd_worldsize
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_post_localSGD_optimizer_parity_with_hierarchical_sgd_grad_is_view(self):
torch.cuda.set_device(self.rank)
self._test_post_localSGD_optimizer_parity(
self._create_hierarchical_model_averager,
grad_is_view=True,
)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_post_localSGD_optimizer_step_reload(self):
torch.cuda.set_device(self.rank)
self._test_post_localSGD_optimizer_step_reload(
self._create_periodic_model_averager
)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm_Channels_Last(self):
group, group_id, rank = self._init_global_test()
num_processes = dist.get_world_size()
local_bs = 2
bs_offset = int(rank * 2)
global_bs = int(num_processes * 2)
model = ONLY_SBN_NET
model_gpu = copy.deepcopy(model).cuda(rank)
model_DDP = nn.parallel.DistributedDataParallel(
model_gpu, device_ids=[rank]
)
memory_format = torch.channels_last
input_gpu = (
torch.randn(global_bs, 2, 4, 4, dtype=torch.float)
.cuda(rank)
.to(memory_format=memory_format)
)
target_gpu = (
torch.randn(global_bs, 2, 4, 4, dtype=torch.float)
.cuda(rank)
.to(memory_format=memory_format)
)
loss = nn.MSELoss()
# check two model parameters over 5 iterations
self._test_DDP_niter(
model_gpu,
model_DDP,
input_gpu,
target_gpu,
loss,
local_bs,
rank,
global_bs,
True,
bs_offset,
dist.get_world_size(),
memory_format=memory_format,
)
self._barrier()
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm(self):
group, group_id, rank = self._init_global_test()
world_size = dist.get_world_size()
# DDP does not support replicating BN layers within a process, hence
# testing with one module replica per process
gpus = [rank]
local_bs = 2
bs_offset = int(rank * 2)
global_bs = int(world_size * 2)
self._test_DistributedDataParallel_SyncBatchNorm(
gpu_subset=gpus,
rank=rank,
local_bs=local_bs,
global_bs=global_bs,
offset=bs_offset,
)
# test output_device
self._test_DistributedDataParallel_SyncBatchNorm(
gpu_subset=gpus,
rank=rank,
local_bs=local_bs,
global_bs=global_bs,
offset=bs_offset,
output_device=torch.device("cuda"),
)
# test device_ids
gpus = [torch.device("cuda:" + str(i)) for i in gpus]
self._test_DistributedDataParallel_SyncBatchNorm(
gpu_subset=gpus,
rank=rank,
local_bs=local_bs,
global_bs=global_bs,
offset=bs_offset,
output_device=torch.device("cuda"),
)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm_No_Affine(self):
group, group_id, rank = self._init_global_test()
world_size = dist.get_world_size()
# DDP does not support replicating BN layers within a process, hence
# testing with one module replica per process
gpus = [rank]
local_bs = 2
bs_offset = int(rank * 2)
global_bs = int(world_size * 2)
self._test_DistributedDataParallel_SyncBatchNorm(
gpu_subset=gpus,
rank=rank,
local_bs=local_bs,
global_bs=global_bs,
offset=bs_offset,
affine=False,
)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm_2D_Input(self):
group, group_id, rank = self._init_global_test()
# DDP does not support replicating BN layers within a process, hence
# testing with one module replica per process
gpus = [rank]
model = nn.BatchNorm1d(2)
# single gpu training setup
model_gpu = copy.deepcopy(model)
model_gpu.cuda(gpus[0])
# DDP training setup
model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))
model_DDP.cuda(gpus[0])
model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus)
local_bs = len(gpus) * 2
global_bs = dist.get_world_size() * local_bs
input_cpu = torch.randn(global_bs, 2)
target = torch.randn(global_bs, 2)
loss = nn.MSELoss()
# disabling cudnn.
# SyncBatchNorm goes through native_batch_norm kernel, this avoids the
# numerical issue created by the divergent code path.
with torch.backends.cudnn.flags(False):
# check two model parameters over 5 iterations
self._test_DDP_niter(
model_gpu,
model_DDP,
input_cpu.cuda(gpus[0]),
target.cuda(gpus[0]),
loss,
local_bs,
rank,
global_bs,
True,
)
self._barrier()
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_no_gpu
@require_world_size(2)
def test_DistributedDataParallel_SyncBatchNorm_Single_Input_Per_Process(self):
group, group_id, rank = self._init_global_test()
# DDP does not support replicating BN layers within a process, hence
# testing with one module replica per process
gpus = [rank]
model = nn.BatchNorm1d(2)
# single gpu training setup
model_gpu = copy.deepcopy(model)
model_gpu.cuda(gpus[0])
# DDP training setup
model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))
model_DDP.cuda(gpus[0])
model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus)
local_bs = 1
global_bs = dist.get_world_size()
input_cpu = torch.randn(global_bs, 2)
target = torch.randn(global_bs, 2)
loss = nn.MSELoss()
# disabling cudnn.
# SyncBatchNorm goes through native_batch_norm kernel, this avoids the
# numerical issue created by the divergent code path.
with torch.backends.cudnn.flags(False):
# check two model parameters over 5 iterations
self._test_DDP_niter(
model_gpu,
model_DDP,
input_cpu.cuda(gpus[0]),
target.cuda(gpus[0]),
loss,
local_bs,
rank,
global_bs,
True,
)
self._barrier()
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_Running_Value(
self,
):
group, group_id, rank = self._init_global_test()
model = nn.parallel.DistributedDataParallel(
ONLY_SBN_NET.cuda(rank), device_ids=[rank]
)
input_var = []
for i in range(dist.get_world_size()):
input_var_rank = torch.cat(
[
torch.ones(2, 1, 10 ** (i + 1)) * (0.1 ** (i - 1)),
torch.ones(2, 1, 10 ** (i + 1)) * (0.3 ** (i - 1)),
],
dim=1,
)
input_var.append(input_var_rank)
all_input_var = torch.cat(
[
x.permute(1, 0, 2).contiguous().view(ONLY_SBN_NET.num_features, -1)
for x in input_var
],
dim=1,
).cuda(rank)
for i in range(100):
y = model(input_var[rank].cuda(rank))
y.mean().backward()
running_mean, running_var = (
model.module.running_mean,
model.module.running_var,
)
torch.testing.assert_close(running_mean, all_input_var.mean(1))
torch.testing.assert_close(running_var, all_input_var.var(1))
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_gradient(self):
group, group_id, rank = self._init_global_test()
# only do single GPU per process
gpus = [rank]
# cpu training setup
model = BN_NET
num_processes = dist.get_world_size()
local_bs = rank + 2
bs_offset = int((rank + 3) * rank / 2)
global_bs = int((num_processes + 3) * num_processes / 2)
self._test_DistributedDataParallel_SyncBatchNorm(
gpu_subset=gpus,
rank=rank,
local_bs=local_bs,
global_bs=global_bs,
offset=bs_offset,
)
def _test_ddp_logging_data(self, is_gpu):
rank = dist.get_rank()
model_DDP = copy.deepcopy(DDP_NET)
if is_gpu:
model_DDP = nn.parallel.DistributedDataParallel(
model_DDP.cuda(rank), device_ids=[rank]
)
else:
model_DDP = nn.parallel.DistributedDataParallel(model_DDP)
# dummy data initialization
local_bs = 2
batch_size, input, target, loss = self._prepare_dummy_data(local_bs)
if is_gpu:
input = input.cuda(rank)
target = target.cuda(rank)
model_DDP._set_ddp_runtime_logging_sample_rate(2)
for idx in range(20):
offset = rank * local_bs
# DDP training, DDP scatters subsets of input to nodes/GPUs
self._test_DDP_helper(
model_DDP,
input[offset : offset + local_bs],
target[offset : offset + local_bs],
loss,
1,
)
self._model_step_with_zero_grad(model_DDP)
# Verify DDP logging data is sampled as expected
# If it has ran more than 10 iteratons and this is
# the sampled iteration for measuring run time stats,
# the run time stats for this idx-th iteration will not
# be zeros.
ddp_logging_data = model_DDP._get_ddp_logging_data()
if idx > 0 and (idx < 10 or idx % 2 == 0):
self.assertGreaterEqual(
ddp_logging_data.get("forward_compute_time"), 1
)
self.assertGreaterEqual(
ddp_logging_data.get("backward_compute_time"), 1
)
self.assertGreaterEqual(
ddp_logging_data.get("backward_comm_time"), 1
)
self.assertGreaterEqual(
ddp_logging_data.get("backward_compute_time"),
ddp_logging_data.get("backward_compute_comm_overlap_time"),
)
self.assertGreaterEqual(
ddp_logging_data.get("backward_comm_time"),
ddp_logging_data.get("backward_compute_comm_overlap_time"),
)
self.assertEqual(ddp_logging_data.get("iteration"), idx)
elif idx > 0:
# if the idx-th iteration is not sampled to set runtime stats,
# ddp_logging_data.iteration will not be updated to current
# iteration.
self.assertNotEqual(ddp_logging_data.get("iteration"), idx)
# Shuffle the input so that DDP input is different
input = input[torch.randperm(batch_size)]
return model_DDP
@sandcastle_skip_if(BACKEND == "nccl", "nccl does not support DDP on CPU models")
def test_ddp_logging_data_cpu(self):
def parse_env(var):
return os.environ[var] if var in os.environ else "N/A"
dist.set_debug_level(dist.DebugLevel.INFO)
group, group_id, rank = self._init_global_test()
model_DDP = self._test_ddp_logging_data(is_gpu=False)
ddp_logging_data = model_DDP._get_ddp_logging_data()
self.assertEqual(ddp_logging_data.get("world_size"), dist.get_world_size())
self.assertEqual(ddp_logging_data.get("rank"), dist.get_rank())
self.assertEqual(ddp_logging_data.get("module_name"), "Net")
self.assertEqual(ddp_logging_data.get("device_ids"), "")
# output_device is -1 in default if it is not set, e.g.
# output_device of CPU training is -1.
self.assertEqual(ddp_logging_data.get("output_device"), -1)
self.assertEqual(ddp_logging_data.get("broadcast_buffers"), 1)
self.assertEqual(ddp_logging_data.get("bucket_cap_bytes"), 25 * 1024 * 1024)
self.assertEqual(ddp_logging_data.get("find_unused_parameters"), 0)
self.assertEqual(ddp_logging_data.get("gradient_as_bucket_view"), 0)
self.assertEqual(
ddp_logging_data.get("backend_name"), dist.get_backend(group_id)
)
self.assertEqual(ddp_logging_data.get("iteration"), 18)
params = list(model_DDP.parameters())
num_params = 0
param_size = 0
params = list(
parameter
for parameter in filter(
lambda parameter: parameter.requires_grad, params
)
)
for p in params:
num_params += 1
param_size += p.numel() * p.element_size()
self.assertEqual(ddp_logging_data.get("dtypes"), "float")
self.assertEqual(
ddp_logging_data.get("total_parameter_size_bytes"), param_size
)
self.assertEqual(ddp_logging_data.get("num_parameter_tensors"), num_params)
self.assertEqual(ddp_logging_data.get("bucket_sizes"), str(param_size))
self.assertEqual(
ddp_logging_data.get("master_port"), parse_env("MASTER_PORT")
)
self.assertEqual(
ddp_logging_data.get("master_addr"), parse_env("MASTER_ADDR")
)
self.assertEqual(
ddp_logging_data.get("torch_distributed_debug"),
parse_env("TORCH_DISTRIBUTED_DEBUG"),
)
self.assertEqual(
ddp_logging_data.get("cuda_visible_devices"),
parse_env("CUDA_VISIBLE_DEVICES"),
)
if ddp_logging_data.get("backend_name") == "gloo":
self.assertEqual(
ddp_logging_data.get("gloo_socket_ifname"),
parse_env("GLOO_SOCKET_IFNAME"),
)
self.assertEqual(
ddp_logging_data.get("gloo_device_transport"),
parse_env("GLOO_DEVICE_TRANSPORT"),
)
default_gloo_threads = 2
self.assertEqual(
ddp_logging_data.get("gloo_num_threads"),
default_gloo_threads,
)
self.assertEqual(ddp_logging_data.get("nccl_socket_ifname"), None)
self.assertEqual(ddp_logging_data.get("nccl_blocking_wait"), None)
self.assertEqual(ddp_logging_data.get("nccl_async_error_handling"), None)
self.assertEqual(ddp_logging_data.get("nccl_debug"), None)
self.assertEqual(ddp_logging_data.get("nccl_nthreads"), None)
self.assertEqual(ddp_logging_data.get("nccl_ib_timeout"), None)
# test runtime logging fields
# Note: DETAIL debug mode logs DDP logging data to stdout and
# thus accesses std::map, which fills in a default value for the
# type if it didn't exist.
self.assertEqual(ddp_logging_data.get("unused_parameter_size", 0), 0)
self.assertEqual(ddp_logging_data.get("has_rebuilt_buckets"), 1)
self.assertEqual(
ddp_logging_data.get("rebuilt_bucket_sizes"), str(param_size)
)
grad_ready_order = ddp_logging_data.get("prev_iteration_grad_ready_order_indices")
expected_order = list(reversed([str(x) for x in range(3)]))
self.assertEqual(grad_ready_order, ", ".join(expected_order))
bucket_indices = ddp_logging_data.get("rebuilt_per_bucket_param_indices")
self.assertEqual(bucket_indices, " ".join(expected_order))
# It is hard to test accurate latency, but it can test whether the latency is
# a valid value and in the expected range.
self.assertGreaterEqual(ddp_logging_data.get("avg_forward_compute_time"), 1)
self.assertGreaterEqual(
ddp_logging_data.get("avg_backward_compute_time"), 1
)
self.assertGreaterEqual(ddp_logging_data.get("avg_backward_comm_time"), 1)
self.assertGreaterEqual(
ddp_logging_data.get("avg_backward_compute_time"),
ddp_logging_data.get("avg_backward_compute_comm_overlap_time"),
)
self.assertGreaterEqual(
ddp_logging_data.get("avg_backward_comm_time"),
ddp_logging_data.get("avg_backward_compute_comm_overlap_time"),
)
# Test host-side times are roughly in the order that we expect
fwd_host_side_time = ddp_logging_data.get("forward_compute_time_start")
bwd_comp_start_host_side_time = ddp_logging_data.get("backward_compute_time_start")
bwd_comp_end_host_side_time = ddp_logging_data.get("backward_compute_time_end")
bwd_comm_start_host_side_time = ddp_logging_data.get("backward_comm_time_start")
bwd_comm_end_host_side_time = ddp_logging_data.get("backward_comm_time_end")
self.assertGreaterEqual(bwd_comm_end_host_side_time, bwd_comm_start_host_side_time)
self.assertGreaterEqual(bwd_comm_start_host_side_time, bwd_comp_start_host_side_time)
self.assertGreaterEqual(bwd_comp_end_host_side_time, bwd_comp_start_host_side_time)
self.assertGreaterEqual(bwd_comp_start_host_side_time, fwd_host_side_time)
# test larger net with mixed data types, verify multiple bucket sizes
model = LargeNet()
model.float()
model.fc1.double()
model_DDP = nn.parallel.DistributedDataParallel(model, bucket_cap_mb=1.5)
ddp_logging_data = model_DDP._get_ddp_logging_data()
params = list(model_DDP.parameters())
self.assertEqual(
ddp_logging_data.get("bucket_cap_bytes"), int(1.5 * 1024 * 1024)
)
bucket_sizes = [
params[1].numel() * params[1].element_size(),
params[0].numel() * params[0].element_size(),
]
self.assertEqual(
ddp_logging_data.get("bucket_sizes"),
", ".join(str(x) for x in bucket_sizes),
)
self.assertEqual(ddp_logging_data.get("dtypes"), "double, float")
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_no_gpu
def test_ddp_logging_data_gpu(self):
group, group_id, rank = self._init_global_test()
model_DDP = self._test_ddp_logging_data(is_gpu=True)
ddp_logging_data = model_DDP._get_ddp_logging_data()
self.assertEqual(ddp_logging_data.get("device_ids"), str(rank))
self.assertEqual(ddp_logging_data.get("output_device"), rank)
grad_ready_order = ddp_logging_data.get("prev_iteration_grad_ready_order_indices")
expected_order = list(reversed([str(x) for x in range(3)]))
self.assertEqual(grad_ready_order, ", ".join(expected_order))
bucket_indices = ddp_logging_data.get("rebuilt_per_bucket_param_indices")
self.assertEqual(bucket_indices, " ".join(expected_order))
# test runtime logging fields
# It is hard to test accurate latency, but it can test whether the latency is
# a valid value and in the expected range.
self.assertGreaterEqual(ddp_logging_data.get("avg_forward_compute_time"), 1)
self.assertGreaterEqual(
ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), 1
)
self.assertGreaterEqual(
ddp_logging_data.get("avg_backward_compute_time"),
ddp_logging_data.get("avg_backward_compute_comm_overlap_time"),
)
self.assertGreaterEqual(
ddp_logging_data.get("avg_backward_comm_time"),
ddp_logging_data.get("avg_backward_compute_comm_overlap_time"),
)
# Test host-side times are roughly in the order that we expect
fwd_host_side_time = ddp_logging_data.get("forward_compute_time_start")
bwd_comp_start_host_side_time = ddp_logging_data.get("backward_compute_time_start")
bwd_comp_end_host_side_time = ddp_logging_data.get("backward_compute_time_end")
bwd_comm_start_host_side_time = ddp_logging_data.get("backward_comm_time_start")
bwd_comm_end_host_side_time = ddp_logging_data.get("backward_comm_time_end")
self.assertGreaterEqual(bwd_comm_end_host_side_time, bwd_comm_start_host_side_time)
self.assertGreaterEqual(bwd_comm_start_host_side_time, bwd_comp_start_host_side_time)
self.assertGreaterEqual(bwd_comp_end_host_side_time, bwd_comp_start_host_side_time)
self.assertGreaterEqual(bwd_comp_start_host_side_time, fwd_host_side_time)
@sandcastle_skip_if(BACKEND == "nccl", "nccl does not support DDP on CPU models")
def test_static_graph_api_cpu(self):
model_DDP = nn.parallel.DistributedDataParallel(DDP_NET)
expected_err = "should be called before training loop starts"
with self.assertRaisesRegex(RuntimeError, expected_err):
local_bs = 2
batch_size, input, target, loss = self._prepare_dummy_data(local_bs)
offset = dist.get_rank() * local_bs
# DDP training, DDP scatters subsets of input to nodes/GPUs
self._test_DDP_helper(
model_DDP,
input[offset : offset + local_bs],
target[offset : offset + local_bs],
loss,
1,
)
model_DDP._set_static_graph()
# Verify error was logged in ddp_logging_data.
verify_ddp_error_logged(model_DDP, expected_err)
@skipIfNoTorchVision
def test_SyncBatchNorm_process_group(self):
# When adopting `convert_sync_batchnorm` to convert a `nn.modules`,
# it need to recursively pass the `process_group` in the module when the `SyncBatchNorm`
# is nested in a sub-module or sub-sub-module (e.g. resnet50 in torchvision.models).
process_ids = 0
process_group = torch.distributed.new_group([process_ids])
res50_model = torchvision.models.resnet50()
res50_model_sync = nn.SyncBatchNorm.convert_sync_batchnorm(
copy.deepcopy(res50_model), process_group
)
process_group_sync = res50_model_sync.layer1[0].bn1.process_group
self.assertEqual(process_group_sync, process_group)
def _run_reduction_test(
self, tensor, expected_tensor, op, reduction_fn=dist.all_reduce, dst=None
):
if reduction_fn != dist.all_reduce and dst is None:
raise ValueError(f"Reduction fn {reduction_fn} must specify dst!")
if dst is not None:
reduction_fn(tensor, dst, op)
# Only destination rank tensor is expected to have final result.
if dist.get_rank() == dst:
self.assertEqual(tensor, expected_tensor)
else:
reduction_fn(tensor, op)
self.assertEqual(tensor, expected_tensor)
@require_backend({"nccl"})
@require_backends_available({"nccl"})
@skip_if_lt_x_gpu(2)
def test_nccl_backend_bool_allreduce(self):
torch.cuda.set_device(self.rank)
# Run all_reduce with PRODUCT
element = self.rank % 2 == 0
for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]:
input_tensor = torch.tensor([element, element]).to(self.rank)
self._run_reduction_test(
input_tensor, torch.tensor([False, False]).to(self.rank), op
)
# Ensure that all ranks contributing True (cast to 1) results in the
# correct reduction.
input_tensor = torch.tensor([True, True]).to(self.rank)
expected_tensor = input_tensor.clone()
self._run_reduction_test(input_tensor, expected_tensor, op)
# Run all_reduce with SUM
for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]:
input_tensor = torch.tensor([element, element]).to(self.rank)
self._run_reduction_test(
input_tensor, torch.tensor([True, True]).to(self.rank), op
)
# TODO: NCCL backend does not work correctly for bitwise reduction ops
# (see https://github.com/pytorch/pytorch/issues/41362). Add tests for
# these once it is supported.
@require_backend({"nccl"})
@require_backends_available({"nccl"})
@skip_if_lt_x_gpu(2)
def test_nccl_backend_bool_allgather(self):
torch.cuda.set_device(self.rank)
inp = {0: [True, True], 1: [False, True]}
input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)
# Preserve a copy of the tensor to compare against after allgather.
input_tensor_copy = input_tensor.clone()
tensor_list = [
torch.tensor([False, False]).to(self.rank)
for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, input_tensor)
self.assertEqual(len(tensor_list), dist.get_world_size())
for i, t in enumerate(tensor_list):
expected = torch.tensor(inp[i % 2]).to(self.rank)
self.assertEqual(t, expected)
# Ensure that the input tensor is not modified, since this collective
# does not modify its input.
self.assertEqual(input_tensor_copy, input_tensor)
@require_backend({"nccl"})
@require_backends_available({"nccl"})
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_nccl_backend_bool_reduce(self):
torch.cuda.set_device(self.rank)
inp = {0: [True, True], 1: [False, False]}
# Run reduce() with product op
for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]:
input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)
expected = torch.tensor([False, False]).to(self.rank)
self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0)
# Ensure that all ranks contributing True (cast to 1) results in the
# correct reduction.
input_tensor = torch.tensor([True, True]).to(self.rank)
expected_tensor = input_tensor.clone()
self._run_reduction_test(
input_tensor, expected_tensor, op, dist.reduce, dst=0
)
for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]:
input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)
expected = (
torch.tensor([True, True]).to(self.rank)
if self.rank == 0
else input_tensor.clone()
)
self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0)
@require_backend({"nccl"})
@require_backends_available({"nccl"})
@skip_if_lt_x_gpu(2)
def test_nccl_backend_bool_broadcast(self):
tensor_size = 10
bcast_tensor = torch.tensor(
[
(random.random() < 0.5 if self.rank == 0 else False)
for _ in range(tensor_size)
]
).to(self.rank)
dist.broadcast(bcast_tensor, src=0)
# Now allgather and ensure the tensors are equal.
tensor_list = [
torch.tensor([False for _ in range(tensor_size)]).to(self.rank)
for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, bcast_tensor)
expected = tensor_list[0]
for tensor in tensor_list[1:]:
self.assertEqual(tensor, expected)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_DistributedSampler_padding(self):
# Tests padding of distributed sampler.
world_size = dist.get_world_size()
# Simulates the 'casual' dataset size
dataset_size = 100 + world_size + 1
dataset = [torch.ones(1).to(self.rank) * i for i in range(dataset_size)]
# Simulates the 'tiny' dataset size
dataset_tiny_size = max(world_size // 2 - 1, 1)
dataset_tiny = [
torch.ones(1).to(self.rank) * i for i in range(dataset_tiny_size)
]
# Specifying drop_last=True will cause the tail of the data to be dropped.
dist_sampler = DistributedSampler(dataset=dataset, drop_last=True)
local_num_samples, local_dataset_size = (
dist_sampler.num_samples,
dist_sampler.total_size,
)
# The effective dataset size should be the greatest integer that is <=
# dataset_size that is divisible by the world_size. This is to ensure each
# rank processes the same number of samples.
effective_dataset_size = (
math.ceil((dataset_size - world_size) / world_size)
if dataset_size % world_size != 0
else dataset_size / world_size
)
self.assertEqual(local_num_samples, effective_dataset_size)
self.assertEqual(local_dataset_size, local_num_samples * world_size)
indices_list = list(iter(dist_sampler))
self.assertEqual(len(indices_list), local_num_samples)
def validate_global_samples(local_num_samples):
# Ensure that each rank processes the same number of samples.
world_samples = [
torch.LongTensor([0]).to(self.rank) for _ in range(world_size)
]
dist.all_gather(
world_samples, torch.tensor([local_num_samples]).to(self.rank)
)
world_samples = [sample.item() for sample in world_samples]
self.assertEqual(len(set(world_samples)), 1)
validate_global_samples(local_num_samples)
# drop_last=False is the default and will add additional indices to be sampled,
# increasing the effective dataset size.
dist_sampler_added_samples = DistributedSampler(dataset=dataset)
local_num_samples, local_dataset_size = (
dist_sampler_added_samples.num_samples,
dist_sampler_added_samples.total_size,
)
# The effective dataset size is the smallest integer that is >= dataset_size
# and divisible by the world size.
self.assertEqual(local_num_samples, math.ceil(dataset_size / world_size))
self.assertEqual(local_dataset_size, local_num_samples * world_size)
indices_list = list(iter(dist_sampler_added_samples))
self.assertEqual(len(indices_list), local_num_samples)
# Ensure that each rank processes the same number of samples.
validate_global_samples(local_num_samples)
# Ensure additional samples are padded even when
# the extremely small dataset is given.
dist_sampler_added_samples_tiny = DistributedSampler(dataset=dataset_tiny)
local_num_samples, local_dataset_size = (
dist_sampler_added_samples_tiny.num_samples,
dist_sampler_added_samples_tiny.total_size,
)
self.assertEqual(
local_num_samples, math.ceil(dataset_tiny_size / world_size)
)
self.assertEqual(local_dataset_size, local_num_samples * world_size)
indices_list = list(iter(dist_sampler_added_samples_tiny))
self.assertEqual(len(indices_list), local_num_samples)
validate_global_samples(local_num_samples)
def _test_allgather_object(self, subgroup=None):
# Only set device for NCCL backend since it must use GPUs.
gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy()
backend = os.environ["BACKEND"]
if backend == "nccl":
# Case where rank != GPU device.
next_rank = (self.rank + 1) % int(self.world_size)
torch.cuda.set_device(next_rank)
# If GPU test, add object with GPU tensor
if backend == "nccl":
gather_objects.append(Foo(torch.randn(3, 3, device=0)))
output_gathered = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(
output_gathered,
gather_objects[self.rank % len(gather_objects)],
group=subgroup,
)
for i, val in enumerate(output_gathered):
expected = gather_objects[i % len(gather_objects)]
self.assertEqual(val, expected)
@require_backend(DistTestCases.backend_feature["gpu"])
@require_n_gpus_for_nccl_backend(
int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"]
)
@with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"])
def test_all_gather_object_default_pg(self):
return self._test_allgather_object()
@require_backend(DistTestCases.backend_feature["gpu"])
@require_n_gpus_for_nccl_backend(
int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"]
)
@with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"])
def test_all_gather_object_subgroup(self):
default = _get_default_group()
backend = dist.get_backend(default)
subgroup = dist.new_group(backend=backend)
return self._test_allgather_object(subgroup=subgroup)
def _test_gather_object(self, pg=None):
# Ensure stateful objects can be gathered
gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy()
my_rank = dist.get_rank(pg)
backend = os.environ["BACKEND"]
if backend == "nccl":
# Case where rank != GPU device.
next_rank = (self.rank + 1) % int(self.world_size)
torch.cuda.set_device(next_rank)
# If GPU test, add object with GPU tensor
if backend == "nccl":
gather_objects.append(Foo(torch.randn(3, 3, device=my_rank)))
output_gathered = [None for _ in range(dist.get_world_size(pg))]
gather_on_rank = 0
dist.gather_object(
gather_objects[self.rank % len(gather_objects)],
object_gather_list=output_gathered
if my_rank == gather_on_rank
else None,
dst=gather_on_rank,
group=pg
)
if my_rank != gather_on_rank:
self.assertEqual(
output_gathered, [None for _ in range(dist.get_world_size())]
)
else:
for i, val in enumerate(output_gathered):
expected = gather_objects[i % len(gather_objects)]
self.assertEqual(val, expected)
# Validate errors when objects can't be pickled.
class Bar:
pass
b = Bar()
gather_objects = [b for _ in range(dist.get_world_size())]
with self.assertRaisesRegex(AttributeError, "Can't pickle local object"):
dist.all_gather_object(
[None for _ in range(dist.get_world_size())],
gather_objects[self.rank],
group=pg
)
@require_backend(DistTestCases.backend_feature["gpu"])
@with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"])
def test_gather_object(self):
return self._test_gather_object()
@require_backend(DistTestCases.backend_feature["gpu"])
@with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"])
def test_gather_object_subgroup(self):
default = _get_default_group()
backend = dist.get_backend(default)
subgroup = dist.new_group(backend=backend)
return self._test_gather_object(subgroup)
def validate_net_equivalence(self, net):
# Helper to validate synchronization of nets across ranks.
net_module_states = list(net.module.state_dict().values())
# Check that all tensors in module's state_dict() are equal.
for t in net_module_states:
tensor_list = [
torch.zeros_like(t) for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, t)
for tensor in tensor_list:
self.assertEqual(tensor, t)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_sync_module_states(self):
# Test that after calling _sync_module_states, models across ranks
# are the same and are equal to the model on the input rank.
dim = 2
rank = self.rank
rank_to_broadcast = 1
# Seed to ensure that ranks are initialized with different initial models.
torch.manual_seed(rank)
model = nn.Linear(dim, dim, bias=False)
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1
)
new_model = nn.Linear(dim, dim, bias=False).cuda(rank)
net.module = copy.deepcopy(new_model)
# Assert params are different
net_module_states = list(net.module.state_dict().values())
for t in net_module_states:
tensor_list = [
torch.zeros_like(t) for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, t)
for i, tensor in enumerate(tensor_list):
if i == rank:
self.assertEqual(t, tensor)
else:
# tensor from another rank should be different.
self.assertNotEqual(t, tensor)
_sync_module_states(
module=net.module,
process_group=net.process_group,
broadcast_bucket_size=net.broadcast_bucket_size,
src=rank_to_broadcast,
params_and_buffers_to_ignore=net.parameters_to_ignore
)
# Now all model params should be the same.
self.validate_net_equivalence(net)
# Since the network params were broadcast from rank_to_broadcast, validate that
# they are the same as new_model on rank_to_broadcast.
if rank == rank_to_broadcast:
expected_states = new_model.state_dict().values()
for t, expected in zip(net_module_states, expected_states):
self.assertEqual(t, expected)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_grad_div_uneven_inputs(self):
# Test gradient division during training with join() API. If
# divide_by_initial_world_size=False, we scale by the effective world
# size when allreducing grads.
dim = 5
batch = 1
grad_scale = 50
rank = self.rank
model = nn.Linear(dim, dim, bias=False)
inp = torch.ones(batch, dim, device=self.rank) * grad_scale
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1
)
n_iters = 3
if self.rank > 0:
n_iters += 2
with net.join(divide_by_initial_world_size=False):
for _ in range(n_iters):
loss = net(inp).sum()
loss.backward()
# The grad is always expected_grad, since we divide by the number
# of currently active processes and inactive processes contribute
# zero gradient. If we kept dividing by static initial world
# size as processes leave, the grad would be smaller.
expected_grad = torch.ones(dim, dim, device=self.rank) * grad_scale
param = list(net.parameters())[0]
self.assertEqual(expected_grad, param.grad)
# Avoid accumulating grads so that it's the same every iteration
net.zero_grad()
torch.cuda.synchronize(device=self.rank)
# If divide_by_initial_world_size=True (default), we always scale grads
# by the initial world_size.
with net.join(divide_by_initial_world_size=True):
for i in range(n_iters):
loss = net(inp).sum()
loss.backward()
effective_ws = dist.get_world_size()
if i >= 3:
effective_ws -= 1
expected_grad = (
torch.ones(dim, dim, device=self.rank)
* grad_scale
* effective_ws
) / dist.get_world_size()
param = list(net.parameters())[0]
self.assertEqual(expected_grad, param.grad)
# Avoid accumulating grad so that it's the same every iteration.
net.zero_grad()
torch.cuda.synchronize(device=self.rank)
def _test_ddp_profiling(self, profiler_ctx):
batch = 3
dim = 10
num_iters = 6
torch.cuda.set_device(self.rank)
model = nn.Linear(dim, dim, bias=False)
inp = torch.rand(batch, dim, device=self.rank)
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank),
device_ids=[self.rank],
)
profiler_ctx_copy = copy.deepcopy(profiler_ctx)
with profiler_ctx as prof:
for i in range(num_iters):
loss = net(inp).sum()
loss.backward()
all_reduce_event_name = f"{dist.get_backend()}:all_reduce"
events = get_profiling_event(all_reduce_event_name, prof)
event_count = sum(e.count for e in events)
self.assertEqual(event_count, num_iters)
for event in events:
self.assertTrue(event.is_async)
self.assertEqual(event.name, all_reduce_event_name)
broadcast_event_name = f"{dist.get_backend()}:broadcast"
broadcast_events = get_profiling_event(broadcast_event_name, prof)
event_count = sum(e.count for e in broadcast_events)
# Broadcast is called during rebuild_buckets
self.assertGreaterEqual(event_count, 1)
for event in broadcast_events:
self.assertEqual(event.name, broadcast_event_name)
# Run DDP with profiling for a few iterations, then enable profiling
# for a single pass, and ensure it is recorded. This tests that the
# thread local state is correctly updated.
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank),
device_ids=[self.rank],
find_unused_parameters=True,
)
for i in range(3):
loss = net(inp).sum()
loss.backward()
# Now enable the profiler.
with profiler_ctx_copy as prof:
loss = net(inp).sum()
loss.backward()
events = get_profiling_event(all_reduce_event_name, prof)
self.assertGreaterEqual(len(events), 1)
self.assertGreaterEqual(events[0].count, 1)
self.assertEqual(events[0].name, all_reduce_event_name)
for event in events:
self.assertTrue(event.is_async)
# Ensure searching unused parameters was profiled
events = get_profiling_event("search_unused_parameters", prof)
self.assertEqual(len(events), 1)
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_profiling_autograd_profiler(self):
autograd_profiler_ctx = torch.autograd.profiler.profile()
return self._test_ddp_profiling(profiler_ctx=autograd_profiler_ctx)
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(IS_FBCODE, "Kineto in fbcode code causes hang")
@sandcastle_skip_if(
IS_MACOS or IS_WINDOWS,
"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124",
)
@skip_if_rocm
def test_ddp_profiling_torch_profiler(self):
cpu_act = torch.profiler.ProfilerActivity.CPU
cuda_act = torch.profiler.ProfilerActivity.CUDA
torch_profiler_ctx = torch.profiler.profile(activities=[cpu_act, cuda_act])
self._test_ddp_profiling(profiler_ctx=torch_profiler_ctx)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_join_model_equivalence(self):
# Verifies equivalence with model training locally and with DDP under
# the join context manager.
batch = 3
dim = 10
learning_rate = 0.03
model = nn.Linear(dim, dim, bias=False)
inp = torch.rand(batch, dim, device=self.rank)
local_model = copy.deepcopy(model)
local_model = local_model.cuda(self.rank)
rank_to_iter_mapping = {
rank: 2 * (rank + 1) for rank in range(dist.get_world_size())
}
# run local model
local_iters = sum(rank_to_iter_mapping.values())
local_optim = torch.optim.SGD(local_model.parameters(), lr=learning_rate)
for _ in range(local_iters):
local_optim.zero_grad()
out = local_model(inp)
loss = out.sum()
loss.backward()
local_optim.step()
# run DDP model with join API
num_iters = rank_to_iter_mapping[self.rank]
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank), device_ids=[self.rank]
)
ddp_optim = torch.optim.SGD(
model.parameters(), lr=learning_rate * dist.get_world_size()
)
with net.join():
for i in range(num_iters):
ddp_optim.zero_grad()
out = net(inp)
loss = out.sum()
loss.backward()
torch.cuda.synchronize(device=self.rank)
ddp_optim.step()
# Validate model state dicts are equal
for (_, local_tensor), (_, dist_tensor) in zip(
local_model.state_dict().items(), net.module.state_dict().items()
):
self.assertEqual(local_tensor, dist_tensor)
def _run_uneven_inputs_test(
self,
test_case,
iteration_mapping,
find_unused_params,
):
model = test_case.model
inp = test_case.inp
rank = self.rank
sync_interval = test_case.sync_interval
torch.cuda.set_device(rank)
# Ensure all outsanding GPU work is comlete so this test runs independently.
dist.barrier()
# Bucket_cap_mb is intentionally low to test allreduce scheduling when
# there are many buckets.
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(rank),
device_ids=[rank],
bucket_cap_mb=1,
find_unused_parameters=find_unused_params,
)
# Register hook if specified
if test_case.hook is not None:
net.register_comm_hook(test_case.state, test_case.hook)
print(f"registered hook {test_case.hook}")
# Determine num iters for this rank via the passed in mapping.
num_iters = iteration_mapping[rank]
# If we throw when earliest rank terminates, we should ensure
# that we iterate for that minimum number of times.
num_iters_tensor = torch.tensor(
[num_iters], device=torch.cuda.current_device()
)
dist.all_reduce(num_iters_tensor, op=dist.ReduceOp.MIN)
min_num_iters = num_iters_tensor.item()
total_iters = 0
if test_case.throw_on_early_termination:
if min_num_iters == num_iters:
# Early termination rank(s)
exception_ctx = self.assertRaisesRegex(
RuntimeError, f"Rank {self.rank} exhausted all inputs"
)
else:
# Non early termination rank
exception_ctx = self.assertRaisesRegex(
RuntimeError,
"Detected at least one rank that exhausted inputs.",
)
else:
exception_ctx = suppress()
with exception_ctx:
with net.join(
throw_on_early_termination=test_case.throw_on_early_termination
):
for i in range(num_iters):
# Use model.no_sync() to disable grad synchronization every
# sync_interval.
if i % sync_interval != 0:
context = net.no_sync()
else:
context = suppress()
with context:
if isinstance(inp, tuple):
loss = net(*inp).sum()
else:
loss = net(inp).sum()
loss.backward()
self._model_step(net)
# Ensure completion of GPU kernels (including allreduce). If the
# join API is not properly implemented, then this should hang
# since the allreduce will hang.
torch.cuda.synchronize(device=rank)
total_iters += 1
if test_case.throw_on_early_termination:
# Ensure we iterated min_num_iters times.
self.assertEqual(total_iters, min_num_iters)
else:
# Ensure we iterated at least min_num_iters times.
self.assertGreaterEqual(total_iters, min_num_iters)
# Ensure completion of all GPU kernels.
torch.cuda.synchronize(device=rank)
# When throwing on early rank termination, we do not
# broadcast model state from an authoritative rank. All models
# should already be in sync.
if not test_case.throw_on_early_termination:
self.assertTrue(net._authoritative_rank)
# All ranks should have agreed on the same authoritative_rank!
final_rank_tensor = torch.tensor(
[net._authoritative_rank], device=self.rank
)
tensor_list = [
torch.zeros_like(final_rank_tensor)
for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, final_rank_tensor)
max_rank = dist.get_world_size() - 1
self.assertSetEqual(
{max_rank}, set(tensor.item() for tensor in tensor_list)
)
# Ensure that all models are the same across ranks after all have joined.
self.validate_net_equivalence(net)
# Ensure that running with DDP uneven inputs was logged.
ddp_logging_data = net._get_ddp_logging_data()
self.assertTrue(ddp_logging_data.get("join_uneven_inputs"))
dist.barrier()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_uneven_inputs_stop_iteration_sync_bn(self):
# Tests that uneven inputs join handler correctly throws StopIteration
# for models with SyncBN or general collective comm when
# throw_on_early_termination=True.
class ModelWithComm(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(2, 40, bias=False)
def forward(self, x):
x = self.lin(x)
dist.all_reduce(x)
return x
torch.cuda.set_device(self.rank)
model_bn = BN_NET
model_bn = nn.SyncBatchNorm.convert_sync_batchnorm(
copy.deepcopy(model_bn)
).cuda(self.rank)
comm_model = ModelWithComm().cuda(self.rank)
model_input = torch.randn(10, 2).cuda(torch.cuda.current_device())
for model in [model_bn, comm_model]:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
)
min_num_iters = 5
if self.rank != 0:
# Early termination rank(s)
num_iters = min_num_iters
exception_ctx = self.assertRaisesRegex(
RuntimeError, f"Rank {self.rank} exhausted all inputs"
)
else:
# Non early termination rank
num_iters = min_num_iters * 2
exception_ctx = self.assertRaisesRegex(
RuntimeError,
"Detected at least one rank that exhausted inputs.",
)
n = 0
with exception_ctx:
with model.join(throw_on_early_termination=True):
for i in range(num_iters):
loss = model(model_input).sum()
loss.backward()
self._model_step(model)
n += 1
self.assertEqual(n, min_num_iters)
# Verify model equivalence
self.validate_net_equivalence(model)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_uneven_inputs(self):
dim = 1000
batch = 1
# Create a variety of models to run uneven input tests on.
large_model = nn.Sequential(
nn.Conv2d(1, 20, 5),
nn.ReLU(),
nn.Conv2d(20, 32, 5),
nn.ReLU(),
nn.Conv2d(32, 256, 5),
nn.ReLU(),
)
small_model = nn.Linear(dim, dim, bias=False)
bn_net = BatchNormNet()
class UnusedParamModule(nn.Module):
def __init__(self, unused_params_rank):
super().__init__()
self.t0 = Task()
self.t1 = Task()
self.unused_params_rank = unused_params_rank
def task_parameters(self):
return (self.t0.p, self.t1.p)
def forward(self, x, rank):
return (
self.t1(self.t0(x))
if rank != self.unused_params_rank
else self.t1(x)
)
unjoined_rank_with_unused_params_model = UnusedParamModule(1)
joined_rank_with_unused_params_model = UnusedParamModule(0)
rank = self.rank
models_to_test = [
# Network with batchnorm
DDPUnevenTestInput(
name="batch_norm_net",
model=bn_net,
inp=torch.ones(batch, 2, device=rank),
sync_interval=1,
),
DDPUnevenTestInput(
name="large_conv_model",
model=large_model,
inp=torch.ones(batch, batch, dim, dim, device=rank),
sync_interval=1,
),
DDPUnevenTestInput(
name="small_model",
model=small_model,
inp=torch.ones(batch, dim, device=rank),
sync_interval=1,
),
# Unused parameter test where rank that does not join early has unused params
DDPUnevenTestInput(
name="unjoined_rank_with_unused_params_model",
model=unjoined_rank_with_unused_params_model,
inp=(torch.ones(batch, 2, device=rank), rank),
sync_interval=1,
),
# Unused parameter test where rank that does join early has unused params
DDPUnevenTestInput(
name="joined_rank_with_unused_params_model",
model=joined_rank_with_unused_params_model,
inp=(torch.ones(batch, 2, device=rank), rank),
sync_interval=1,
),
]
# Test models that have hook installed.
models_with_hook = [
DDPUnevenTestInput(
name="small_model_allreduce_hook",
model=small_model,
hook=default.allreduce_hook,
state=None,
inp=torch.ones(batch, dim, device=rank),
sync_interval=1,
),
DDPUnevenTestInput(
name="small_model_power_sgd_hook",
model=small_model,
hook=powerSGD.powerSGD_hook,
state=powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
# Config so that powerSGD runs immediately instead of
# allreduce.
start_powerSGD_iter=1,
warm_start=False,
use_error_feedback=False,
),
inp=torch.ones(batch, dim, device=rank),
sync_interval=1,
),
]
models_to_test.extend(models_with_hook)
# Add resnet model if we have torchvision installed.
if HAS_TORCHVISION:
resnet_model = torchvision.models.resnet50()
models_to_test.append(
DDPUnevenTestInput(
name="resnet_model",
model=resnet_model,
inp=torch.ones(1, 3, 1000, 1000),
sync_interval=1,
)
)
# Test with no_sync every 2, 3, 4, ... iterations.
models_with_sync = []
for i, test_input in enumerate(models_to_test):
models_with_sync.append(
DDPUnevenTestInput(
name=test_input.name,
model=test_input.model,
inp=test_input.inp,
sync_interval=i + 2,
)
)
throw_on_early_term_tests = []
for test_input in models_to_test:
throw_on_early_term_tests.append(
DDPUnevenTestInput(
name=test_input.name,
model=test_input.model,
inp=test_input.inp,
sync_interval=test_input.sync_interval,
throw_on_early_termination=True,
)
)
models_to_test.extend(models_with_sync)
models_to_test.extend(throw_on_early_term_tests)
# 0 iteration tests for when one process does not train model at all, so
# we must shadow the broadcast calls made when rebuilding buckets.
baseline_num_iters = [0, 5]
iteration_offsets = [2, 3, 10]
num_uneven_ranks = [1]
if dist.get_world_size() > 2:
num_uneven_ranks.append(2)
iteration_mappings = []
# Generate rank : num_iters mappings for various uneven input scenarios.
# This includes cases where rank 0 joins early and all other ranks join
# later, and scenarios where multiple ranks join early, but at different
# iterations, and later ranks join later.
for num_early_join_ranks in num_uneven_ranks:
for baseline_iter in baseline_num_iters:
for offset in iteration_offsets:
mapping = {
rank: baseline_iter
for rank in range(0, num_early_join_ranks)
}
# if num_early_join_ranks > 1, ranks > 0 that will join early
# iterate offset//2 more times than rank 0, to test nodes
# depleting inputs at different times.
if num_early_join_ranks > 1:
for rank in mapping.keys():
if rank > 0:
mapping[rank] += offset // 2
mapping.update(
{
rank: baseline_iter + offset
for rank in range(
num_early_join_ranks, dist.get_world_size()
)
}
)
iteration_mappings.append(mapping)
for (test_case, iteration_mapping) in itertools.product(
models_to_test, iteration_mappings
):
if self.rank == 0:
print(
f"""Running test: {test_case.name} sync interval
{test_case.sync_interval} with iteration mapping
{iteration_mapping}"""
)
self._run_uneven_inputs_test(
test_case,
iteration_mapping,
find_unused_params=("unused_params_model" in test_case.name),
)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_uneven_input_join_disable(self):
# tests that if net.join() with enable=False is specified, DDP works as
# expected with even inputs.
torch.manual_seed(self.rank)
net = torch.nn.parallel.DistributedDataParallel(
torch.nn.Linear(1, 1).cuda(self.rank), device_ids=[self.rank]
)
inp = torch.ones(1) * self.rank
n_iters = 5
world_size = dist.get_world_size()
with net.join(enable=False):
for _ in range(n_iters):
# Clear grads
grad = net.module.weight.grad
if grad is not None:
grad.requires_grad_(False)
grad.zero_()
out = net(inp)
loss = out.sum()
loss.backward()
# Validate gradients to ensure that we divide by the correct
# world_size when join mode is disabled.
expected_grad = sum(i for i in range(world_size)) / world_size
self.assertEqual(net.module.weight.grad.item(), expected_grad)
join_config = net._join_config
self.assertFalse(join_config.enable)
self.validate_net_equivalence(net)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_uneven_input_exception(self):
# Tests that exceptions during training are correctly propagated by the
# context manager.
error_str = "Intentional error"
class ExceptionModule(nn.Module):
def __init__(self):
super().__init__()
self.param = nn.Parameter(torch.ones(1, requires_grad=True))
def forward(self, _):
raise ValueError(error_str)
exception_module = ExceptionModule()
net = torch.nn.parallel.DistributedDataParallel(
exception_module.cuda(self.rank), device_ids=[self.rank]
)
inp = torch.ones(1)
with self.assertRaisesRegex(ValueError, error_str):
with net.join():
out = net(inp)
loss = out.sum()
loss.backward()
def _test_broadcast_object_list(self, group=None):
gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy()
# Only set device for NCCL backend since it must use GPUs.
# Case where rank != GPU device.
next_rank = (self.rank + 1) % int(self.world_size)
backend = os.environ["BACKEND"]
if backend == "nccl":
torch.cuda.set_device(next_rank)
src_rank = 0
# If GPU test, add object with GPU tensor
if backend == "nccl":
gather_objects.append(Foo(torch.randn(3, 3, device=0)))
if IS_FBCODE:
# Create Tensor with > 2^31 Bytes storage requirements
# Only on FBCODE as testing OOMs in OSS
gather_objects.append(Foo(torch.randn(3, 178956971)))
objects = (
gather_objects
if self.rank == src_rank
else [None for _ in gather_objects]
)
# Single object test with device specified. Backend="gloo", device=cpu
if backend != "nccl":
single_obj_list = [objects[0]]
if self.rank != src_rank:
self.assertNotEqual(
single_obj_list[0], gather_objects[0]
)
dist.broadcast_object_list(
single_obj_list, src=0, group=group, device=torch.device("cpu")
)
self.assertEqual(single_obj_list[0], gather_objects[0])
# Single object test with device specified. Backend="gloo", device=current_device+1
# The test is gated by the fact GPU count is the same as world size to avoid the case
# when backend is gloo but there is no multiple GPU devices.
if backend != "nccl" and torch.cuda.device_count() == int(self.world_size):
single_obj_list = [objects[0]]
if self.rank != src_rank:
self.assertNotEqual(
single_obj_list[0], gather_objects[0]
)
dist.broadcast_object_list(
single_obj_list, src=0, group=group, device=torch.device(next_rank)
)
self.assertEqual(single_obj_list[0], gather_objects[0])
# Single object test with device specified. Backend="nccl", device=current_device+1
if backend == "nccl" and torch.cuda.device_count() == int(self.world_size):
single_obj_list = [objects[0]]
if self.rank != src_rank:
self.assertNotEqual(
single_obj_list[0], gather_objects[0]
)
dist.broadcast_object_list(
single_obj_list, src=0, group=group, device=torch.device(next_rank)
)
self.assertEqual(single_obj_list[0], gather_objects[0])
# Single object test: backward compatibility with device unspecified
single_obj_list = [objects[0]]
if self.rank != src_rank:
self.assertNotEqual(single_obj_list[0], gather_objects[0])
dist.broadcast_object_list(single_obj_list, src=0, group=group)
self.assertEqual(single_obj_list[0], gather_objects[0])
# Multiple input objects test
if self.rank != src_rank:
self.assertNotEqual(objects, gather_objects)
dist.broadcast_object_list(objects, src=0, group=group)
self.assertEqual(objects, gather_objects)
@require_backend(DistTestCases.backend_feature["gpu"])
@require_n_gpus_for_nccl_backend(
int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"]
)
@with_dist_debug_levels(levels=["DETAIL"])
def test_broadcast_object_list(self):
return self._test_broadcast_object_list()
@require_backend(DistTestCases.backend_feature["gpu"])
@require_n_gpus_for_nccl_backend(
int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"]
)
@with_dist_debug_levels(levels=["DETAIL"])
def _test_broadcast_object_list_subgroup(self):
default = _get_default_group()
backend = dist.get_backend(default)
subgroup = dist.new_group(backend=backend)
return self._test_broadcast_object_list(subgroup)
def _test_ddp_ignore_params_arg(self, static_graph=False):
class TestModel(nn.Module):
def __init__(self, rank):
self.rank = rank
super(TestModel, self).__init__()
self.fc1 = nn.Linear(1, 1, bias=False)
# Proxy that will be materialized to another architecture later.
# (after wrapping model with DDP)
if self.rank == 0:
self.fc2 = nn.Linear(1, 10, bias=False)
else:
self.fc2 = nn.Linear(10, 10, bias=False)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
device_id = self.rank
# Ensure the test works for both find_unused_parameter and broadcast_buffer settings.
for (find_unused, broadcast_buffers) in itertools.product(
[False, True], [False, True]
):
model = TestModel(self.rank).float().to(device_id)
# Note that the model can have different shape buffers if we pass
# them in to be ignored as well.
model.fc2.register_buffer(
"ignore_buffer", torch.zeros(5 + self.rank, device=self.rank)
)
proxy_params = list(model.fc2.parameters())
proxy_buffers = list(model.fc2.buffers())
model_fc2_name = [
module_name
for module_name, module in model.named_modules()
if module is model.fc2
][0]
proxy_param_names = [
f"{model_fc2_name}.{param_name}"
for param_name, _ in model.fc2.named_parameters()
]
proxy_buffer_names = [
f"{model_fc2_name}.{buf_name}"
for buf_name, _ in model.fc2.named_buffers()
]
# Specify that we should ignore proxy_params since it will be
# materialized later.
torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
model, proxy_param_names + proxy_buffer_names
)
ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[device_id],
find_unused_parameters=find_unused,
broadcast_buffers=broadcast_buffers,
static_graph=static_graph,
)
# Materialize new params. These are not registered in DDP and thus
# don't have autograd hooks installed on them.
ddp.module.fc2 = nn.Linear(1, 1, bias=False).to(device_id)
# Rebuild replicated_module to pick up the changes.
ddp._build_replicated_tensor_module()
# local model with the new materialized parameters.
local_model = copy.deepcopy(ddp.module).cuda(self.rank)
inp = torch.ones(1, dtype=torch.float).to(device_id) * (self.rank + 1)
for i in range(6):
ddp(inp).sum().backward()
local_model(inp).sum().backward()
# materialized param grad is not touched by DDP, so its grad should
# be the same as if running locally.
for materialized_param, local_param in zip(
ddp.module.fc2.parameters(), local_model.fc2.parameters()
):
self.assertEqual(materialized_param.grad, local_param.grad)
# fc1 parameter grad should still be different, due to allreduce.
for synced_param, local_param in zip(
ddp.module.fc1.parameters(), local_model.fc1.parameters()
):
self.assertFalse(synced_param.grad == local_param.grad)
# Proxy module grad should not be touched
for proxy_param in proxy_params:
self.assertTrue(proxy_param.grad is None)
# Synchronize since we run multiple iterations of this test, to
# isolate failure hangs.
torch.cuda.synchronize(device=self.rank)
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_ignore_params_arg(self):
self._test_ddp_ignore_params_arg(static_graph=False)
self._test_ddp_ignore_params_arg(static_graph=True)
@with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"])
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_unused_params_rebuild_buckets_exception(self):
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
self.net1 = nn.Linear(10, 10, bias=False)
self.net2 = nn.Linear(10, 10, bias=False)
def forward(self, x):
return self.net1(x)
ddp = torch.nn.parallel.DistributedDataParallel(
ToyModel().cuda(self.rank), device_ids=[self.rank]
)
for i in range(2):
inp = torch.rand(1, 10)
if i > 0:
# On 2nd iteration, this will fail during rebuild_buckets,
# but we should report an error regarding unused parameters
# since that is the underlying root cause.
try:
ddp(inp).sum().backward()
except RuntimeError as e:
msg = str(e)
verify_ddp_error_logged(ddp, msg)
expected_strs = [
ddp_prev_reduction_unfinished_str,
ddp_recommend_find_unused_params_str,
ddp_outputs_not_used_in_loss_str,
]
# In debug mode, should show parameters that weren't reduced.
# Without debug mode, should show suggestion to use debug mode.
if dist.get_debug_level() == dist.DebugLevel.OFF:
expected_strs.append(ddp_suggest_debug_mode_str)
else:
unreduced_params = ", ".join(["net2.weight"])
expected_strs.append(
f"did not receive grad for rank {self.rank}: {unreduced_params}"
)
for s in expected_strs:
self.assertTrue(s in msg, f"Expected {s} to be in {msg}")
self.assertFalse(ddp_find_unused_params_enabled_str in msg)
else:
self.assertFalse(
True, "DDP unused parameters error not raised."
)
else:
ddp(inp).sum().backward()
dist.barrier()
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_shared_grad_acc_unused_params(self):
# When find_unused_parameters=True, ensure we mark unused parameters
# even if they share gradient accumulators.
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
# net1, bias, and net1.bias are all unused params.
self.net1 = nn.Linear(10, 5, bias=False)
self.bias = nn.Parameter(torch.zeros(5))
# net1.bias and self.bias are names for the same underlying
# parameter, so they share the same grad acc. This caused
# the bug reported in https://github.com/pytorch/pytorch/issues/41324.
self.net1.bias = self.bias
self.net2 = nn.Linear(10, 5)
def forward(self, x):
return self.net2(x).sum()
torch.cuda.set_device(self.rank)
model = ToyModel().to(torch.cuda.current_device())
for static in [True, False]:
ddp_model = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(model),
device_ids=[self.rank],
find_unused_parameters=True,
static_graph=static,
)
inp = torch.randn(20, 10, device=self.rank)
for i in range(6):
loss = ddp_model(inp)
# To test https://github.com/pytorch/pytorch/issues/61982
loss /= 10
loss.backward()
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_device(self):
m = nn.Linear(10, 10).to(self.rank)
expected_len = 2
class TensorWrapper:
__slots__ = ["t", "moved_to_gpu"]
def __init__(self, t):
self.t = t
self.moved_to_gpu = False
# Handlers for specific types of validation we want to do based on
# the input type.
def tuple_and_list_validator(x):
self.assertTrue(len(x), expected_len)
self.assertEqual(1, len(set(t.device for t in x)))
self.assertEqual(x[0].device.index, self.rank)
return x[0] + x[1]
def namedtuple_validator(x):
self.assertEqual(x._fields, EXPECTED_FIELDS)
self.assertEqual(x.a.device.index, x.b.device.index)
self.assertEqual(x.a.device.index, self.rank)
return x.a + x.b
def custom_type_validator(x):
self.assertTrue(x.moved_to_gpu or (str(x.t.device) == "cpu"))
x.t = x.t.to(self.rank)
x.moved_to_gpu = True
return x.t
def dict_validator(x):
self.assertTrue(EXPECTED_FIELDS[0] in x.keys())
self.assertTrue(EXPECTED_FIELDS[1] in x.keys())
self.assertEqual(1, len(set(t.device for t in x.values())))
self.assertEqual(x[EXPECTED_FIELDS[0]].device.index, self.rank)
return x[EXPECTED_FIELDS[0]] + x[EXPECTED_FIELDS[1]]
validators = {
TensorWrapper: custom_type_validator,
tuple: tuple_and_list_validator,
list: tuple_and_list_validator,
TestNamedTupleInput_0: namedtuple_validator,
TestNamedTupleInput_1: namedtuple_validator,
dict: dict_validator,
}
class ToyModel(torch.nn.Module):
def __init__(_self): # noqa: B902
super().__init__()
_self.lin = nn.Linear(10, 10, bias=False)
def forward(_self, x, expected_type): # noqa: B902
# Similar to scatter, the recursive to in the single-device
# case does not move tensors if they are in a custom type.
self.assertTrue(isinstance(x, expected_type))
fwd_tensor = validators[expected_type](x)
return _self.lin(fwd_tensor)
model = torch.nn.parallel.DistributedDataParallel(
ToyModel().to(self.rank), device_ids=[self.rank]
)
def train_iter(inp, input_type):
for _ in range(4):
out = model(inp, input_type)
out.sum().backward()
# CPU tuple input, should be moved to the proper device before call
# to forward.
inp = tuple(torch.randn(10, 10) for _ in range(expected_len))
train_iter(inp, tuple)
# List CPU input, should be moved to proper device before call to
# forward.
inp = [torch.randn(10, 10) for _ in range(expected_len)]
train_iter(inp, list)
# Custom type containing tensor. The type is maintained, but the
# device is not propagated (which is what happens with scatter too)
inp = TensorWrapper(torch.randn(10, 10))
train_iter(inp, TensorWrapper)
# NamedTuple input. The type should be maintained and tensor inputs
# should be moved to the correct device as in scatter.
batch = 5
dim = 10
a = torch.rand(batch, dim)
b = torch.rand(batch, dim)
inp = TestNamedTupleInput_0(a, b)
train_iter(inp, type(inp))
inp = TestNamedTupleInput_1(a, b)
train_iter(inp, type(inp))
# dictionary input.
inp = {
EXPECTED_FIELDS[0]: a,
EXPECTED_FIELDS[1]: b,
}
train_iter(inp, type(inp))
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_namedtuple(self):
batch = 5
dim = 10
a = torch.rand(batch, dim, device=self.rank)
b = torch.rand(batch, dim, device=self.rank)
class NamedTupleModule(torch.nn.Module):
def __init__(_self): # noqa: B902
super().__init__()
_self.lin = nn.Linear(10, 1)
def forward(_self, input, expected_type): # noqa: B902
# Without NamedTuple support, this would be of type tuple.
self.assertTrue(
isinstance(input, expected_type),
f"Expected type {expected_type} but got {type(input)}",
)
self.assertEqual(input._fields, EXPECTED_FIELDS)
self.assertEqual(a, input.a)
self.assertEqual(b, input.b)
return _self.lin(torch.mul(input.a, input.b))
model = torch.nn.parallel.DistributedDataParallel(
NamedTupleModule().cuda(self.rank), device_ids=[self.rank]
)
inp = TestNamedTupleInput_0(a, b)
# The following would fail if DDP does not propagate NamedTuples correctly.
model(inp, type(inp))
inp = TestNamedTupleInput_1(a, b)
model(inp, type(inp))
@with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"])
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_control_flow_same_across_ranks(self):
# Control flow that is the same across ranks.
batch = 20
dim = 10
world_size = dist.get_world_size()
torch.cuda.set_device(self.rank)
model = torch.nn.parallel.DistributedDataParallel(
ControlFlowToyModel().cuda(self.rank),
device_ids=[self.rank],
find_unused_parameters=True,
)
random_input = torch.randn(batch, dim, device=self.rank)
ones_input = torch.ones(batch, dim, device=self.rank)
for i in range(6):
if i % 2 == 0:
out = model(random_input)
else:
out = model(ones_input)
loss = out.sum()
loss.backward()
# On even iterations, 2nd param goes unused, on odd iterations,
# it is used.
local_used_map = model.reducer._get_local_used_map()
if i % 2 == 0:
expected = torch.tensor(
[world_size, 0], device=self.rank, dtype=torch.int32
)
else:
expected = torch.tensor(
[world_size, world_size], device=self.rank, dtype=torch.int32
)
# Validate parameter usage.
variable_usage_tensor = local_used_map
self.assertEqual(variable_usage_tensor, expected)
# Validate appropriate error message when DDP is used with
# find_unused_parameters=False.
model = torch.nn.parallel.DistributedDataParallel(
ControlFlowToyModel().cuda(self.rank),
device_ids=[self.rank],
find_unused_parameters=False,
)
for i in range(2):
if i == 0:
loss = model(random_input).sum()
loss.backward()
else:
try:
loss = model(random_input).sum()
loss.backward()
except RuntimeError as e:
msg = str(e)
verify_ddp_error_logged(model, msg)
# 2nd linear layer is unused
unused_param_index = 1
expected_strs = [
ddp_prev_reduction_unfinished_str,
ddp_recommend_find_unused_params_str,
ddp_outputs_not_used_in_loss_str,
f"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}",
]
# In debug mode, should show parameters that weren't reduced.
# Without debug mode, should show suggestion to use debug mode.
if dist.get_debug_level() == dist.DebugLevel.OFF:
expected_strs.append(ddp_suggest_debug_mode_str)
else:
unreduced_params = ", ".join(["lin2.weight"])
expected_strs.append(
f"did not receive grad for rank {self.rank}: {unreduced_params}"
)
for s in expected_strs:
self.assertTrue(s in msg, f"Expected {s} to be in {msg}")
self.assertFalse(ddp_find_unused_params_enabled_str in msg)
else:
self.assertFalse(True, "DDP error not raised")
dist.barrier()
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_invalid_static_graph(self):
world_size = dist.get_world_size()
torch.cuda.set_device(self.rank)
model = torch.nn.parallel.DistributedDataParallel(
ControlFlowToyModel().cuda(self.rank),
device_ids=[self.rank],
static_graph=True,
)
random_input = torch.randn(20, 10, device=self.rank)
ones_input = torch.ones(20, 10, device=self.rank)
# unused parameter in the first iteration got used
# in second iteration.
expected_err = "Your training graph has changed in this iteration"
with self.assertRaisesRegex(RuntimeError, expected_err):
for i in range(2):
if i % 2 == 0:
out = model(random_input)
else:
out = model(ones_input)
loss = out.sum()
loss.backward()
verify_ddp_error_logged(model, expected_err)
# used parameter in the first iteration got unused
# in second iteration.
with self.assertRaisesRegex(
RuntimeError,
"Expected to have finished reduction in the prior iteration "
"before starting a new one. This error indicates that your "
"training graph has changed in this iteration, "
"e.g., one parameter is used in first iteration, "
"but then got unused in the second iteration. "
"this is not compatible with static_graph set to True.\n"
"Parameter indices which did not receive grad for"
):
for i in range(2):
if i % 2 != 0:
out = model(random_input)
else:
out = model(ones_input)
loss = out.sum()
loss.backward()
verify_ddp_error_logged(model, "Expected to have finished reduction")
@with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"])
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_control_flow_different_across_ranks(self):
# Control flow that is different across ranks.
batch = 20
dim = 10
class ToyModel(nn.Module):
def __init__(self, rank):
super(ToyModel, self).__init__()
self.lin1 = nn.Linear(10, 10, bias=False)
self.lin2 = nn.Linear(10, 10, bias=False)
self.rank = rank
def forward(self, x):
# Control-flow that is rank and input dependent for the
# model.
use_second_layer = (
torch.equal(x, torch.ones(batch, dim, device=x.device))
and self.rank == 1
)
if use_second_layer:
return self.lin2(F.relu(self.lin1(x)))
else:
return F.relu(self.lin1(x))
world_size = dist.get_world_size()
torch.cuda.set_device(self.rank)
model = torch.nn.parallel.DistributedDataParallel(
ToyModel(self.rank).cuda(self.rank),
device_ids=[self.rank],
find_unused_parameters=True,
)
random_input = torch.randn(batch, dim, device=self.rank)
ones_input = torch.ones(batch, dim, device=self.rank)
for i in range(6):
if i % 2 == 0:
out = model(random_input)
else:
out = model(ones_input)
loss = out.sum()
loss.backward()
# On even iterations, 2nd param goes unused, on odd iterations,
# it is used only on rank 1.
local_used_map = model.reducer._get_local_used_map()
if i % 2 == 0:
expected = torch.tensor(
[world_size, 0], device=self.rank, dtype=torch.int32
)
else:
expected = torch.tensor(
[world_size, 1], device=self.rank, dtype=torch.int32
)
variable_usage_tensor = local_used_map
# Validate parameter usage. On odd iterations, 2nd param is only
# used on rank 1.
self.assertEqual(variable_usage_tensor, expected)
# Validate appropriate error message when DDP is used with
# find_unused_parameters=False.
model = torch.nn.parallel.DistributedDataParallel(
ToyModel(self.rank).cuda(self.rank),
device_ids=[self.rank],
find_unused_parameters=False,
)
for i in range(2):
if i == 0:
loss = model(random_input).sum()
loss.backward()
else:
try:
loss = model(random_input).sum()
loss.backward()
except RuntimeError as e:
msg = str(e)
verify_ddp_error_logged(model, msg)
unused_param_index = 1
expected_strs = [
ddp_prev_reduction_unfinished_str,
ddp_recommend_find_unused_params_str,
ddp_outputs_not_used_in_loss_str,
f"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}",
]
# In debug mode, should show parameters that weren't reduced.
# Without debug mode, should show suggestion to use debug mode.
if dist.get_debug_level() == dist.DebugLevel.OFF:
expected_strs.append(ddp_suggest_debug_mode_str)
else:
unreduced_params = ", ".join(["lin2.weight"])
expected_strs.append(
f"did not receive grad for rank {self.rank}: {unreduced_params}"
)
for s in expected_strs:
self.assertTrue(s in msg, f"Expected {s} to be in {msg}")
self.assertFalse(ddp_find_unused_params_enabled_str in msg)
else:
self.assertFalse(True, "DDP error not raised")
dist.barrier()
@require_backend({"gloo"})
def test_scatter_object_list(self):
src_rank = 0
scatter_list = (
COLLECTIVES_OBJECT_TEST_LIST
if self.rank == src_rank
else [None for _ in COLLECTIVES_OBJECT_TEST_LIST]
)
world_size = dist.get_world_size()
scatter_list = scatter_list[:world_size]
i = 0
while len(scatter_list) < world_size:
scatter_list.append(scatter_list[i])
i += 1
output_obj_list = [None]
dist.scatter_object_list(output_obj_list, scatter_list, src=src_rank)
self.assertEqual(
output_obj_list[0],
COLLECTIVES_OBJECT_TEST_LIST[
self.rank % len(COLLECTIVES_OBJECT_TEST_LIST)
],
)
# Ensure errors are raised upon incorrect arguments.
with self.assertRaisesRegex(
RuntimeError,
"Expected argument scatter_object_output_list to be a list of size at least 1.",
):
dist.scatter_object_list([], scatter_list, src=src_rank)
def _generate_sparse_tensors_for_bucket_assignment_test(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
tensors_sparse = [t.to_sparse() for t in tensors]
return tensors_sparse
def _test_compute_bucket_assignment_by_size(self, use_logger):
group_gloo = dist.new_group(
timeout=timedelta(seconds=60), backend=dist.Backend.GLOO
)
# Set NCCL_BLOCKING_WAIT and use a new NCCL group to improve test
# determinism.
os.environ["NCCL_BLOCKING_WAIT"] = "1"
group_to_use = dist.new_group(
backend=dist.get_backend(), timeout=timedelta(seconds=5)
)
torch.cuda.set_device(self.rank)
# Create a valid model. The constructor initializes the logger that we use later.
# We never actually use the rest of the model - we only need its logger.
net = EmbeddingNetDifferentParams(0)
net = torch.nn.parallel.DistributedDataParallel(
net.to(self.rank),
device_ids=[self.rank],
process_group=group_to_use,
)
# if we don't pass a logger then we can only check that an exception was thrown.
expected_err = "No support for sparse tensors."
with self.assertRaisesRegex(RuntimeError, expected_err):
tensors_sparse = self._generate_sparse_tensors_for_bucket_assignment_test()
if use_logger:
result = dist._compute_bucket_assignment_by_size(
tensors_sparse,
[400],
logger=net.logger)
else:
result = dist._compute_bucket_assignment_by_size(tensors_sparse, [400])
if use_logger:
verify_ddp_error_logged(net, expected_err)
# Perform gloo-based barrier to ensure one rank doesn't exit test
# early which causes failure with Barrier.sync.
dist.barrier(group_gloo)
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_compute_bucket_assignment_by_size_sparse_error_without_logger(self):
self._test_compute_bucket_assignment_by_size(use_logger=False)
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_compute_bucket_assignment_by_size_sparse_error_with_logger(self):
self._test_compute_bucket_assignment_by_size(use_logger=True)
def _determine_expected_error_verify_model_across_rank(
self,
group_to_use,
diff_num_params=False
):
# When running with NCCL backend, we don't expect an error on rank 0,
# rather, it will be taken down by NCCL_ASYNC_ERROR_HANDLING. When
# running with Gloo or with debug mode wrapper, we expect the error
# to be caught inline.
# All ranks report same error when there is a # of parameter
# mismatch since we use allgather in the impl.
if diff_num_params:
expected_err = "DDP expects same model across all ranks"
ctx = self.assertRaisesRegex(RuntimeError, expected_err)
return ctx, expected_err
is_detail_dbg_mode = (
dist.get_debug_level() == dist.DebugLevel.DETAIL
)
if self.rank == 0:
if dist.get_backend(group_to_use) == dist.Backend.NCCL and not is_detail_dbg_mode:
expected_err = "Caught collective operation timeout"
ctx = self.assertRaisesRegex(RuntimeError, expected_err)
else:
expected_err = None
ctx = self.assertRaises(RuntimeError)
else:
expected_err = "appears not to match"
ctx = self.assertRaisesRegex(RuntimeError, expected_err)
return ctx, expected_err
def _test_verify_model_across_rank(self, use_logger):
group_gloo = dist.new_group(
timeout=timedelta(seconds=60), backend=dist.Backend.GLOO
)
# Set NCCL_BLOCKING_WAIT and use a new NCCL group to improve test
# determinism.
os.environ["NCCL_BLOCKING_WAIT"] = "1"
group_to_use = dist.new_group(
backend=dist.get_backend(), timeout=timedelta(seconds=5)
)
torch.cuda.set_device(self.rank)
ctx, expected_err = self._determine_expected_error_verify_model_across_rank(group_to_use)
# Create a valid model. The constructor initializes the logger that we use later.
net = EmbeddingNetDifferentParams(0)
net = torch.nn.parallel.DistributedDataParallel(
net.to(self.rank),
device_ids=[self.rank],
process_group=group_to_use,
)
# Modify the model so that the number of parameters are different for each rank.
# This will cause a RuntimeError to be thrown below in _verify_param_shape_across_processes,
# so we can check if the correct error is thrown and is logged.
# We can't do this in the constructor above otherwise the logger will
# not be properly initialized.
net.module.lin = nn.Linear(100 if self.rank == 0 else 10, 1)
# if we pass a logger we can verify that it was logged
with ctx:
if use_logger:
_verify_param_shape_across_processes(
net.process_group,
list(net.parameters()),
net.logger
)
else:
_verify_param_shape_across_processes(
net.process_group,
list(net.parameters())
)
# Should only be run by rank 0, and blocking_wait catches and
# reports exception.
dist.barrier(group_to_use)
# We don't check when self.rank != 0 because the logger doesn't log
# the error "Caught collective operation" as that is not thrown in the reducer.
if use_logger and self.rank != 0:
verify_ddp_error_logged(net, expected_err)
# Perform gloo-based barrier to ensure one rank doesn't exit test
# early which causes failure with Barrier.sync.
dist.barrier(group_gloo)
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_verify_model_across_rank_with_logger(self):
self._test_verify_model_across_rank(use_logger=True)
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_verify_model_across_rank_without_logger(self):
self._test_verify_model_across_rank(use_logger=False)
def _run_test_ddp_model_with_diff_params(self, ctx, net, ddp_group, group_gloo):
with ctx:
net = torch.nn.parallel.DistributedDataParallel(
net.to(self.rank),
device_ids=[self.rank],
process_group=ddp_group
)
# Should only be run by rank 0, and blocking_wait catches and
# reports exception.
dist.barrier(ddp_group)
# can't use verify_ddp_error_logged here because net was never properly constructed
# Perform gloo-based barrier to ensure one rank doesn't exit test
# early which causes failure with Barrier.sync.
dist.barrier(group_gloo)
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_model_diff_shape_across_ranks(self):
group_gloo = dist.new_group(
timeout=timedelta(seconds=60), backend=dist.Backend.GLOO
)
# Set NCCL_BLOCKING_WAIT and use a new NCCL group to improve test
# determinism.
os.environ["NCCL_BLOCKING_WAIT"] = "1"
group_to_use = dist.new_group(
backend=dist.get_backend(), timeout=timedelta(seconds=10)
)
torch.cuda.set_device(self.rank)
ctx, expected_err = self._determine_expected_error_verify_model_across_rank(group_to_use)
# Creates network with different sized embedding table on different
# ranks. This should throw an error during DDP init.
net = EmbeddingNetDifferentParams(self.rank)
self._run_test_ddp_model_with_diff_params(
ctx, net, group_to_use, group_gloo
)
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_model_diff_num_params_across_ranks(self):
group_gloo = dist.new_group(
timeout=timedelta(seconds=60), backend=dist.Backend.GLOO
)
# Set NCCL_BLOCKING_WAIT and use a new NCCL group to improve test
# determinism.
os.environ["NCCL_BLOCKING_WAIT"] = "1"
group_to_use = dist.new_group(
backend=dist.get_backend(), timeout=timedelta(seconds=10)
)
torch.cuda.set_device(self.rank)
ctx, expected_err = self._determine_expected_error_verify_model_across_rank(
group_to_use, diff_num_params=True
)
# Creates network with diff # of param across ranks, reducer should
# recognize this and throw appropriate error.
net = EmbeddingNetDifferentParams(self.rank, diff_num_params=(self.rank == 1))
self._run_test_ddp_model_with_diff_params(
ctx, net, group_to_use, group_gloo,
)
def _test_output_unused_in_loss(self, module_cls, gradient_as_bucket_view):
model = module_cls()
local_net = copy.deepcopy(model)
net = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(model).cuda(self.rank),
device_ids=[self.rank],
find_unused_parameters=True,
)
# Tests that certain parameters not getting gradient since the
# output is unused in loss computation is supported. Specifically,
# checks that the grads remain unchanged and are the same as local
# training.
inp = torch.randn(10, 10)
# Ensure that if a param is not used in loss computation, its
# gradient is untouched, i.e. if it is None before it is None after,
# not zero.
if module_cls == DictOutputModule:
a, b = local_net(inp)["predictions"]
a_dist, b_dist = net(inp)["predictions"]
else:
a, b = local_net(inp)
a_dist, b_dist = net(inp)
loss_dist = b_dist.sum()
loss_dist.backward()
# Ensure that gradient corresponding to parameter "a" was not
# touched, i.e. it is None and matches the local grad.
if module_cls == DictOutputModule:
self.assertTrue(net.module.module.a.weight.grad is None)
self.assertEqual(
net.module.module.a.weight.grad, local_net.module.a.weight.grad
)
else:
self.assertTrue(net.module.a.weight.grad is None)
self.assertEqual(net.module.a.weight.grad, local_net.a.weight.grad)
saved_a_local_grad = None
saved_a_dist_grad = None
net.zero_grad()
local_net.zero_grad()
for i in range(6):
if module_cls == DictOutputModule:
a, b = local_net(inp)["predictions"]
a_dist, b_dist = net(inp)["predictions"]
else:
a, b = local_net(inp)
a_dist, b_dist = net(inp)
if i < 2:
# Use both params in loss computation. Later, "a" will go
# unused and we check to ensure DDP supports this and
# gradients remain the same as local training.
t = a @ b
t_dist = a_dist @ b_dist
loss = t.sum()
loss_dist = t_dist.sum()
else:
# Model output "a" unused in loss.
loss = b.sum()
loss_dist = b_dist.sum()
loss.backward()
loss_dist.backward()
if i == 1:
# Save grads to compare with them in next iterations.
if module_cls == DictOutputModule:
saved_a_local_grad = local_net.module.a.weight.grad
saved_a_dist_grad = net.module.module.a.weight.grad
else:
saved_a_local_grad = local_net.a.weight.grad
saved_a_dist_grad = net.module.a.weight.grad
self.assertEqual(saved_a_local_grad, saved_a_dist_grad)
elif i >= 2:
# parameter "a" of both models should be the same and not change
if module_cls == DictOutputModule:
self.assertEqual(net.module.module.a.weight.grad, saved_a_dist_grad)
self.assertEqual(local_net.module.a.weight.grad, saved_a_local_grad)
else:
self.assertEqual(net.module.a.weight.grad, saved_a_dist_grad)
self.assertEqual(local_net.a.weight.grad, saved_a_local_grad)
# Verify grads are the same
for (local_param, dist_param) in zip(
local_net.parameters(), net.parameters()
):
local_grad = local_param.grad
dist_grad = dist_param.grad
self.assertEqual(local_grad, dist_grad)
dist.barrier()
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_lt_x_gpu(2)
def test_output_unused_in_loss_tuple_module(self):
module_cls = UnusedParamTwoLinLayerNet
for grad_as_bucket_view in [True, False]:
self._test_output_unused_in_loss(module_cls, grad_as_bucket_view)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_lt_x_gpu(2)
def test_output_unused_in_loss_dict_module(self):
module_cls = DictOutputModule
for grad_as_bucket_view in [True, False]:
self._test_output_unused_in_loss(module_cls, grad_as_bucket_view)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_lt_x_gpu(2)
def test_undefined_grad_parity_unused_parameters(self):
# TODO: enable this for general training use cases:
# https://github.com/pytorch/pytorch/issues/58511.
x = torch.ones(1, 2).to(self.rank)
net = Net().to(self.rank)
local_net = copy.deepcopy(net)
net = torch.nn.parallel.DistributedDataParallel(
net,
device_ids=[self.rank],
find_unused_parameters=True,
)
out = net(x).sum()
local_out = local_net(x).sum()
# Simulates undefined gradients.
torch._C._functions.UndefinedGrad()(out).backward()
torch._C._functions.UndefinedGrad()(local_out).backward()
for (dist_param_name, dist_param), (local_param_name, local_param) in zip(
net.named_parameters(), local_net.named_parameters()
):
dist_grad = dist_param.grad
local_grad = local_param.grad
self.assertEqual(
dist_grad,
local_grad,
f"""DDP param {dist_param_name} with grad {dist_grad}
does not match local param {local_param_name} with grad
{local_grad}""",
)
def _test_different_graph_across_ranks(
self, find_unused_parameters=False, static_graph=False
):
class ToyModel(nn.Module):
def __init__(self, rank):
super(ToyModel, self).__init__()
self.lin1 = nn.Linear(10, 10, bias=False)
self.lin2 = nn.Linear(10, 10, bias=False)
self.rank = rank
def forward(self, x):
if self.rank == 0:
return self.lin2(F.relu(self.lin1(x)))
else:
return F.relu(self.lin1(x))
torch.manual_seed(31415)
world_size = dist.get_world_size()
torch.cuda.set_device(self.rank)
model = ToyModel(self.rank).cuda(self.rank)
ddp_model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
find_unused_parameters=find_unused_parameters,
gradient_as_bucket_view=True,
static_graph=static_graph,
)
random_input = torch.randn(20, 10, device=self.rank)
for i in range(10):
out = ddp_model(random_input)
loss = out.sum()
loss.backward()
return ddp_model
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_different_graph_across_ranks(self):
base_model = self._test_different_graph_across_ranks(
find_unused_parameters=True
)
self.assertFalse(
base_model._get_ddp_logging_data().get("has_rebuilt_buckets", 0)
)
static_model = self._test_different_graph_across_ranks(static_graph=True)
self.assertTrue(
static_model._get_ddp_logging_data().get("has_rebuilt_buckets", 0)
)
for i, j in zip(base_model.parameters(), static_model.parameters()):
self.assertEqual(i, j)
@require_backend({"gloo"})
@require_backends_available({"gloo"})
@sandcastle_skip_if(
IS_MACOS or IS_WINDOWS,
"MacOS uses uv transport which does not have as robust error handling as tcp transport",
)
def test_monitored_barrier_gloo(self):
tensors = [torch.ones(10) * self.rank]
# Kick off some allreduce work on all ranks
for _ in range(10):
dist.all_reduce(torch.cat(tensors))
# Run monitored barrier and ensure it passees
timeout = timedelta(seconds=2)
dist.monitored_barrier(timeout=timeout)
# Check monitored_barrier success with wait_all_ranks=True
for _ in range(10):
dist.all_reduce(torch.cat(tensors))
dist.monitored_barrier(timeout=timeout, wait_all_ranks=True)
# All ranks besides 1 call into barrier, rank 0 should report failure
# while others report gloo error.
failed_rank = 1
src_rank = 0
if self.rank == src_rank:
with self.assertRaisesRegex(
RuntimeError, f"Rank {failed_rank} failed to pass monitoredBarrier"
):
dist.monitored_barrier(timeout=timeout)
elif self.rank != failed_rank:
# Other ranks should not pass barrier since rank 0 failed.
err_regex = (
f"Rank {self.rank} successfully reached monitoredBarrier,"
f" but received errors while waiting for send/recv from rank"
f" {src_rank}"
)
with self.assertRaisesRegex(RuntimeError, err_regex):
dist.monitored_barrier(timeout=timeout)
# We need a barrier since otherwise failed_rank exits too early
# and cause a timeout.
self._barrier(timeout=30)
@require_backend({"gloo"})
@require_backends_available({"gloo"})
def test_monitored_barrier_gloo_subgroup(self):
# Tests that monitored_barrier works as expected on non-default
# process groups.
failed_rank = 1
timeout = 0.1
subgroup = dist.new_group(ranks=[0, 1])
if self.rank == failed_rank:
return
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, f"Rank {failed_rank} failed to pass monitoredBarrier"
):
dist.monitored_barrier(subgroup, timeout)
else:
# Other ranks call into monitored_barrier, but this should be a
# noop because they are not part of the subgroup. Verify that
# there are no errors here.
dist.monitored_barrier(subgroup, timeout)
def _test_monitored_barrier_allreduce_hang(self, wait_all_ranks):
# tests expected behavior when nonzero rank hangs.
nccl_pg = dist.new_group(
ranks=list(i for i in range(int(self.world_size))),
# provide sufficient timeout so communicators
# can be initialized in ctor.
timeout=timedelta(seconds=15),
backend=dist.Backend.NCCL,
)
gloo_pg = dist.new_group(
ranks=list(i for i in range(int(self.world_size))),
backend=dist.Backend.GLOO,
)
tensors = [torch.ones(10, device=self.rank) * self.rank]
# Let all ranks call allreduce first to set up communicators etc.
# Directly simulating error here will run into store issue described
# in https://github.com/pytorch/pytorch/issues/54524.
nccl_pg.allreduce(tensors).wait(timedelta(seconds=5))
# All ranks besides 0 call into allreduce. This is to simulate a
# desync across the world, where some ranks call into
# monitored_barrier() and others are stuck in collective comm. In
# practice, we don't need NCCL_BLOCKING_WAIT, but we use it in this
# test to ensure it exits cleanly.
if self.rank != 0:
# Can get different errors here depending on whether gloo-based
# wrapper PG is enabled or not, since with wrapper pg, it will
# fail in a collective synchronization check and not actually
# call into the nccl pg.
if dist.get_debug_level() == dist.DebugLevel.DETAIL:
err_regex = "Timed out waiting"
else:
err_regex = "Caught collective operation timeout"
with self.assertRaisesRegex(RuntimeError, err_regex):
nccl_pg.allreduce(tensors).wait(timedelta(seconds=0.1))
else:
# Rank 0 should report first (in order) timed out rank or all ranks
# depending on wait_all_ranks flag passed into monitored_barrier.
if wait_all_ranks:
rank_str = ", ".join(
[str(i) for i in range(1, int(self.world_size))]
)
err_regex = f"Ranks {rank_str} failed to pass monitoredBarrier"
else:
expected_first_fail_rank = 1
err_regex = f"Rank {expected_first_fail_rank} failed to pass monitoredBarrier"
monitored_barrier_timeout_seconds = timedelta(seconds=0.1)
with self.assertRaisesRegex(RuntimeError, err_regex):
gloo_pg.monitored_barrier(
monitored_barrier_timeout_seconds, wait_all_ranks=wait_all_ranks
)
self._barrier(timeout=30)
@with_nccl_blocking_wait
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_monitored_barrier_allreduce_hang(self):
# tests expected behavior when nonzero rank hangs and we want to
# report first timed out rank.
self._test_monitored_barrier_allreduce_hang(wait_all_ranks=False)
@with_nccl_blocking_wait
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_monitored_barrier_allreduce_hang_wait_all_ranks(self):
# tests expected behavior when nonzero rank hangs and we want to
# report all timed out ranks.
self._test_monitored_barrier_allreduce_hang(wait_all_ranks=True)
@require_backend({"gloo"})
@require_backends_available({"gloo"})
def test_monitored_barrier_gloo_rank_0_timeout(self):
# tests error when rank 0 exhausts its given timeout.
process_group = dist.new_group(
ranks=list(i for i in range(int(self.world_size)))
)
timeout = timedelta(seconds=0)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, f"Rank {self.rank} timed out in monitoredBarrier"
):
process_group.monitored_barrier(timeout)
@require_backend({"gloo"})
@require_backends_available({"gloo"})
@skip_if_small_worldsize
@sandcastle_skip_if(
IS_MACOS or IS_WINDOWS,
"MacOS uses uv transport which does not have as robust error handling as tcp transport",
)
def test_monitored_barrier_failure_order(self):
# Ensure that the first (in sorted order) rank is reported when
# multiple ranks fail to pass the monitored_barrier.
# TODO(#54879): Provide ability to wait and report all failed ranks
expected_first_failed_rank = 2
timeout = timedelta(seconds=2)
src_rank = 0
if self.rank == src_rank:
with self.assertRaisesRegex(
RuntimeError, f"Rank {expected_first_failed_rank}"
):
dist.monitored_barrier(timeout=timeout)
elif self.rank == 1:
err_regex = (
f"Rank {self.rank} successfully reached monitoredBarrier,"
f" but received errors while waiting for send/recv from rank"
f" {src_rank}"
)
with self.assertRaisesRegex(RuntimeError, err_regex):
dist.monitored_barrier(timeout=timeout)
@require_backend({"gloo"})
@require_backends_available({"gloo"})
@skip_if_small_worldsize
def test_monitored_barrier_wait_all_ranks(self):
# Tests simple case where > 1 rank does not call into monitored
# barrier and verifies all ranks are reported by rank 0.
if self.rank == 0:
timeout = timedelta(seconds=0.1)
rank_str = ", ".join([str(i) for i in range(1, int(self.world_size))])
err_regex = f"Ranks {rank_str} failed to pass monitoredBarrier"
with self.assertRaisesRegex(RuntimeError, err_regex):
dist.monitored_barrier(timeout=timeout, wait_all_ranks=True)
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@with_dist_debug_levels(levels=["INFO"])
@skip_if_lt_x_gpu(2)
def test_ddp_build_debug_param_to_name_mapping(self):
model = TwoLinLayerNet()
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank),
device_ids=[self.rank],
)
expected_mapping = {0: "a.weight", 1: "b.weight"}
net_params, _ = net._build_params_for_reducer()
param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params)
self.assertDictEqual(expected_mapping, param_to_name_mapping)
# Test when DDP is used with ignored parameters.
model = TwoLinLayerNet()
# Parameters to ignore are in the format {module_name}.{param_name}
params_to_ignore = ["a.weight"]
torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
model, params_to_ignore
)
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank),
device_ids=[self.rank],
)
expected_mapping = {0: "b.weight"}
net_params, _ = net._build_params_for_reducer()
param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params)
self.assertDictEqual(expected_mapping, param_to_name_mapping)
# Test errors are raised when DDP and module parameters mismatch.
# This generally indicates a bug with DDP and is not expected to
# happen in user applications.
model = TwoLinLayerNet()
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank),
device_ids=[self.rank],
)
net_params, _ = net._build_params_for_reducer()
if self.rank == 0:
print(type(net_params[0]))
net_params.extend(
[
torch.nn.Parameter(torch.ones(1)),
torch.nn.Parameter(torch.ones(1)),
]
)
with self.assertRaisesRegex(ValueError, "Expected param to name mapping"):
net._build_debug_param_to_name_mapping(net_params)
net_params = net_params[:-3]
with self.assertRaisesRegex(ValueError, "Param with name"):
net._build_debug_param_to_name_mapping(net_params)
net_params.extend(
[
torch.nn.Parameter(torch.ones(1)),
torch.nn.Parameter(torch.ones(1)),
]
)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@with_dist_debug_levels(levels=["INFO"])
@skip_if_lt_x_gpu(2)
def test_ddp_build_debug_param_to_name_mapping_requires_grad(self):
class Net(nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(10, 10)
# Is not tracked by DDP and should not show up in param to
# name mapping.
self.lin.bias.requires_grad_(False)
def forward(self, x):
return self.lin(x)
model = Net()
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank), device_ids=[self.rank]
)
expected_mapping = {
0: "lin.weight",
}
net_params, _ = net._build_params_for_reducer()
param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params)
self.assertEqual(param_to_name_mapping, expected_mapping)
def _test_ddp_multiple_nested_unused_params_error(self, ignore_sparse):
debug_mode_off = dist.get_debug_level() == dist.DebugLevel.OFF
class SubModule(nn.Module):
def __init__(self):
super().__init__()
self.embedding_net = EmbeddingNetDifferentParams(0)
self.lin = TwoLinLayerNet()
self.bn = BatchNormNet()
self.lin_layer = nn.Linear(4, 10, bias=False)
def forward(self, x):
x = self.bn(x)
x = self.lin_layer(x)
x = self.lin.a(x) # self.lin.b param unused
# EmbeddingNetDifferentParams entirely unused: self.embedding_net.embedding and
# self.embedding_net.lin unused.
return x
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.sub_module = SubModule()
def forward(self, x):
return self.sub_module(x)
model = MyModel()
sparse_embedding_fqns = []
if ignore_sparse:
for module_name, module in model.named_modules():
if module == model.sub_module.embedding_net.embedding:
for parameter_name, param in module.named_parameters(
recurse=False
):
fqn = f"{module_name}.{parameter_name}"
sparse_embedding_fqns.append(fqn)
torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
model, sparse_embedding_fqns
)
unused_modules = [
model.sub_module.embedding_net.lin,
model.sub_module.lin.b,
]
else:
unused_modules = list(model.sub_module.embedding_net.modules()) + [
model.sub_module.lin.b,
]
expected_unused_param_fqns = []
used_param_fqns = [] # Validate that these don't mistakenly show up.
fqn_to_param_index = {}
index = 0
for module_name, module in model.named_modules():
for parameter_name, param in module.named_parameters(recurse=False):
fqn = f"{module_name}.{parameter_name}"
fqn_to_param_index[fqn] = index
if fqn not in sparse_embedding_fqns:
index += 1
if module in unused_modules:
expected_unused_param_fqns.append(fqn)
else:
if (
not ignore_sparse
or module != model.sub_module.embedding_net.embedding
):
used_param_fqns.append(fqn)
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank),
device_ids=[self.rank],
)
batch, dim = 10, 2
inp = torch.ones(batch, dim)
for i in range(2):
if i == 0:
out = net(inp)
loss = out.sum()
loss.backward()
else:
try:
out = net(inp)
loss = out.sum()
loss.backward()
except RuntimeError as e:
e = str(e)
unused_param_substr = e[e.find("did not receive grad") :]
# Validate that each unused param fully qualified name
# shows up in error logs. We do this instead of
# constructing a joined string since order of parameters
# can be different in Reducer. In addition, validate
# param indices show up as well.
for unused_param_fqn in expected_unused_param_fqns:
self.assertTrue(
unused_param_fqn in unused_param_substr
or debug_mode_off
)
self.assertTrue(
str(fqn_to_param_index[unused_param_fqn])
in unused_param_substr,
f"Did not find index {fqn_to_param_index[unused_param_fqn]} for {unused_param_fqn}",
)
# Validate that used param fqns don't show up in error
# logs.
for used_param_fqn in used_param_fqns:
self.assertFalse(used_param_fqn in unused_param_substr)
# Validate that ignored param fqns don't show up as unused
# (since DDP does not track them)
for sparse_param_fqn in sparse_embedding_fqns:
self.assertFalse(sparse_param_fqn in unused_param_substr)
else:
self.assertTrue(False, "Expected error was not raised!")
@with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"])
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_multiple_nested_unused_params_error(self):
self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=False)
@with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"])
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_multiple_nested_unused_params_err_ignore_params(self):
# Tests unused parameter reporting when DDP is configured to ignore
# certain parameters.
self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=True)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_lt_x_gpu(2)
def test_ddp_inference(self):
# tests that DDP module can be run on a single node with no_grad
# or eval setting and there is no hang.
rank = self.rank
torch.cuda.set_device(rank)
model = Net().cuda()
local_model = copy.deepcopy(model)
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[rank],
)
syncbn_model = nn.SyncBatchNorm(
2, momentum=0.99, track_running_stats=False
).cuda()
local_syncbn_model = copy.deepcopy(syncbn_model)
syncbn_model = torch.nn.parallel.DistributedDataParallel(
syncbn_model, device_ids=[rank]
)
inp = torch.randn(10, 2, device=rank)
inp_syncbn = torch.randn(10, 2, 4, 4, device=rank)
tests = [
(model, local_model, inp),
(syncbn_model, local_syncbn_model, inp_syncbn),
]
for test in tests:
test_model, test_local_model, test_inp = test
if self.rank == 0:
test_model.eval()
test_local_model.eval()
for _ in range(6):
self.assertEqual(
test_model(test_inp), test_local_model(test_inp)
)
# Barrier since only rank 0 runs inference. Test should be
# much faster than 30s, but this is to avoid flakiness.
self._barrier(timeout=30)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
@skip_if_lt_x_gpu(2)
def test_ddp_sync_bn_training_vs_eval(self):
rank = self.rank
torch.cuda.set_device(rank)
# Need to set track_running_stats=False, when track_running_stats=True,
# bn_training is False and sync could not occur in eval model.
model = nn.SyncBatchNorm(2, momentum=0.99, track_running_stats=False).cuda(
rank
)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank])
# Test sync occurs in training mode.
with torch.autograd.profiler.profile() as prof:
for i in range(6):
inp = torch.randn(10, 2, 4, 4).cuda(rank)
out = model(inp)
loss = out.sum()
loss.backward()
# SyncBN allgathers stats across all ranks, so verify call to
# all_gather in profiler.
if BACKEND == "nccl":
all_gather_calls = get_profiling_event("_all_gather_base", prof)
else:
all_gather_calls = get_profiling_event("all_gather", prof)
self.assertNotEqual([], all_gather_calls)
# Only do inference on one rank. If SyncBN did collective stats sync,
# this would hang/error.
model_inference = model.module
if self.rank == 0:
model_inference.eval()
with torch.autograd.profiler.profile() as prof:
for i in range(6):
inp = torch.randn(10, 2, 4, 4).cuda(rank)
out = model_inference(inp)
loss = out.sum()
loss.backward()
# Ensure sync does not occur in eval() mode.
if BACKEND == "nccl":
all_gather_calls = get_profiling_event("_all_gather_base", prof)
else:
all_gather_calls = get_profiling_event("all_gather", prof)
self.assertEqual([], all_gather_calls)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_python_error_logged(self):
# Most python exceptions in DDP are raised during init before
# reducer is constructed, so we don't have a logger in those cases.
# However, the below is one example where a python error is thrown
# after reducer is constructed.
model = TwoLinLayerNet().cuda(self.rank)
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
)
expected_err = "must be callable"
with self.assertRaisesRegex(TypeError, expected_err):
model.register_comm_hook({}, {})
verify_ddp_error_logged(model, expected_err)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_static_graph_nested_types(self):
# Tests for static graph training when outputs are not just tensors
# but can be (nested) tuple, list, dict, etc.
rank = self.rank
torch.cuda.set_device(rank)
class NestedOutputModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(100, 1, bias=False)
def forward(self, inp, output_type):
if output_type == "tuple":
return (
self.lin(inp),
(
self.lin(inp),
self.lin(inp),
),
)
elif output_type == "list":
return [
self.lin(inp),
[
self.lin(inp),
self.lin(inp),
],
]
elif output_type == "dict":
return {
"a": self.lin(inp),
"b": {
"c": self.lin(inp),
},
}
def get_loss(model_output):
loss = 0.0
if isinstance(model_output, torch.Tensor):
return model_output.sum()
elif isinstance(model_output, dict):
for value in model_output.values():
loss += get_loss(value)
elif isinstance(model_output, tuple) or isinstance(model_output, list):
for x in model_output:
loss += get_loss(x)
else:
raise ValueError(f"Unknown model output type {type(model_output)}")
return loss
model = NestedOutputModule().cuda(rank)
model_static_graph = copy.deepcopy(model)
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[rank],
)
model_static_graph = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[rank],
static_graph=True,
)
inp = torch.randn(10, 100)
type_mapping = {
"list": list,
"tuple": tuple,
"dict": dict,
}
for output_type in type_mapping.keys():
for i in range(6):
out = model(inp, output_type=output_type)
loss = get_loss(out)
loss.backward()
self._model_step(model)
out_static = model_static_graph(inp, output_type=output_type)
self.assertTrue(isinstance(out_static, type_mapping[output_type]))
loss_static = get_loss(out_static)
loss_static.backward()
self._model_step(model_static_graph)
for (p, p_static) in zip(
model.parameters(), model_static_graph.parameters()
):
self.assertEqual(p, p_static)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_returns_tensor_with_no_grad(self):
# Tests case where module returns tensor that does not require grad.
torch.cuda.set_device(self.rank)
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(10, 10, bias=False)
self.fc2 = nn.Linear(10, 10, bias=False)
def forward(self, x):
x = self.fc2(F.relu(self.fc1(x)))
y = x.clone()
x = x.detach()
assert not x.requires_grad
return (x, y)
model = MyModel().to(self.rank)
inp = torch.randn(1, 10, device=self.rank)
for (find_unused, static_graph) in itertools.product([True, False], [True, False]):
ddp = DistributedDataParallel(
model,
device_ids=[self.rank],
output_device=self.rank,
find_unused_parameters=find_unused,
static_graph=static_graph,
)
for i in range(6):
out = ddp(inp)
self.assertFalse(out[0].requires_grad)
o = (out[0] + out[1]).sum()
o.backward()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_detect_ddp_is_actually_static(self):
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
self.net1 = nn.Linear(10, 10, bias=False)
self.net2 = nn.Linear(10, 10)
def forward(self, x, find_unused, dynamic):
if find_unused:
if dynamic:
return self.net2(self.net1(x))
else:
return self.net2(x)
else:
return self.net2(self.net1(x))
# Set of unused parameters don't change across iterations
torch.cuda.set_device(self.rank)
model = ToyModel().cuda()
for find_unused in [True, False]:
ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
find_unused_parameters=find_unused,
)
inp = torch.randn(1, 10, device="cuda")
for _ in range(6):
out = ddp(inp, find_unused=find_unused, dynamic=False)
loss = out.sum()
loss.backward()
self.assertTrue(ddp.reducer._ddp_graph_static())
# Set of unused parameters dynamically change
ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
find_unused_parameters=True,
)
inp = torch.randn(1, 10, device="cuda")
for i in range(6):
out = ddp(inp, find_unused=True, dynamic=i % 2 == 0)
loss = out.sum()
loss.backward()
self.assertFalse(ddp.reducer._ddp_graph_static())
def _test_ddp_new_tensor_in_fwd(self, static_graph):
# Test from https://github.com/pytorch/pytorch/issues/60733
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(10, 10, bias=False)
self.fc2 = nn.Linear(10, 10, bias=False)
self.device = self.fc1.weight.device
def __init_opt(self):
opt = torch.randn(1, 10, device=self.device)
return opt
def forward(self, x, opt_1, opt_2, opt_nested):
x = F.relu(self.fc1(x))
x = self.fc2(x)
if opt_1 is None:
opt_1 = self.__init_opt()
if opt_2 is None:
opt_2 = self.__init_opt()
if opt_nested is None or not torch.is_tensor(opt_nested):
opt_nested = self.__init_opt()
# Test multiple tensors as well as newly created tensors
# within a struct.
return x, opt_1, opt_2, {"tensor": opt_nested}
model = MyModel().to(self.rank)
for find_unused in [True, False]:
ddp = DistributedDataParallel(
model,
device_ids=[self.rank],
output_device=self.rank,
broadcast_buffers=False,
find_unused_parameters=find_unused,
static_graph=static_graph,
)
opt = [None for _ in range(3)]
for i in range(2):
ddp.zero_grad()
x = torch.randn(1, 10, device=self.rank)
out, opt[0], opt[1], opt[2] = ddp(
x, opt_1=opt[0], opt_2=opt[1], opt_nested=opt[2]
)
for i in range(len(opt)):
if torch.is_tensor(opt[i]):
self.assertEqual(opt[i].grad_fn, None)
else:
self.assertEqual(opt[i]["tensor"].grad_fn, None)
out.mean().backward()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_new_tensor_in_fwd(self):
return self._test_ddp_new_tensor_in_fwd(static_graph=False)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_new_tensor_in_fwd_static_graph(self):
return self._test_ddp_new_tensor_in_fwd(static_graph=True)
def _test_ddp_buffer_hook_allreduce(self, return_futures):
rank = self.rank
torch.cuda.set_device(rank)
torch.manual_seed(rank)
torch.cuda.manual_seed(rank)
def buffer_comm_hook(ddp, named_buffers):
buffers = [
buffer for (_, buffer) in named_buffers.items()
]
futs = [
dist.all_reduce(buffer, group=ddp.process_group, async_op=True).get_future()
for buffer in buffers
]
if return_futures:
return futs
else:
torch.futures.collect_all(futs).wait()
hook_pre_fwd = torch.nn.parallel.distributed._BufferCommHookLocation.PRE_FORWARD
hook_post_fwd = torch.nn.parallel.distributed._BufferCommHookLocation.POST_FORWARD
for hook_run_location in [
hook_pre_fwd,
hook_post_fwd,
]:
model = NetWithBuffers().cuda(rank)
model_ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
)
model_ddp._register_buffer_comm_hook(
model_ddp,
buffer_comm_hook,
hook_run_location
)
model_ddp_no_hook = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(model),
device_ids=[self.rank],
broadcast_buffers=False
)
inp = torch.randn(2, 10, device=rank)
for i in range(2):
loss_hook = model_ddp(inp).sum()
# Since buffer reduction is done pre-forward, simulate it for
# no hook case here.
# Simulate allreduce appropriately depending on hook location.
if hook_run_location == hook_pre_fwd:
model_no_hook_buffers = list(model_ddp_no_hook.module.buffers())
for tensor in model_no_hook_buffers:
dist.all_reduce(tensor)
loss_no_hook = model_ddp_no_hook(inp).sum()
if hook_run_location == hook_post_fwd:
model_no_hook_buffers = list(model_ddp_no_hook.module.buffers())
for tensor in model_no_hook_buffers:
dist.all_reduce(tensor)
torch.cuda.synchronize()
# if return_futures, they are only awaited on by DDP
# at the end of the backwards pass for maximum overlap.
if not return_futures:
self._verify_buffers_equal(model_ddp, model_ddp_no_hook)
loss_hook.backward()
loss_no_hook.backward()
# Note that when custom hooks return futures, this
# comparison is not expected to work when hook run location
# is pre-forward pass. This is because the hook does async
# communication and forward pass modifies the buffer without
# appropriate synchronization. Therefore, if returning
# futures from custom buffer hooks, it is advised to set
# hook run location to post forward.
if return_futures and hook_run_location == hook_post_fwd:
self._verify_buffers_equal(model_ddp, model_ddp_no_hook)
dist.barrier()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_buffer_hook_allreduce_return_future(self):
self._test_ddp_buffer_hook_allreduce(
return_futures=True
)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_buffer_hook_allreduce(self):
self._test_ddp_buffer_hook_allreduce(
return_futures=False
)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_broadcast_buffer_via_hook(self):
# test that _distributed_broadcast_coalesced via registered hook is
# equivalent to DDP's default broadcast coalesced.
rank = self.rank
torch.cuda.set_device(rank)
torch.manual_seed(rank)
torch.cuda.manual_seed(rank)
def buffer_comm_hook(ddp, named_buffers):
# named_buffers is a Dict[str, Tensor] representing a mapping
# from buffer name to buffer.
buffers = [
buffer for (_, buffer) in named_buffers.items()
]
ddp._default_broadcast_coalesced(buffers)
model = NetWithBuffers().cuda(rank)
model_ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
)
model_ddp._register_buffer_comm_hook(
model_ddp,
buffer_comm_hook
)
model_ddp_no_hook = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(model),
device_ids=[self.rank],
)
inp = torch.randn(2, 10, device=rank)
for i in range(2):
loss_hook = model_ddp(inp).sum()
loss_no_hook = model_ddp_no_hook(inp).sum()
self._verify_buffers_equal(model_ddp, model_ddp_no_hook)
loss_hook.backward()
loss_no_hook.backward()
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_ddp_broadcast_buffer(self):
rank = self.rank
torch.cuda.set_device(rank)
torch.manual_seed(rank)
torch.cuda.manual_seed(rank)
class NetWithBuffers(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Linear(10, 10, bias=False)
self.b = nn.Linear(10, 1, bias=False)
self.register_buffer('buffer', torch.randn(1, 2))
def forward(self, x):
return self.b(self.a(x))
model = NetWithBuffers().cuda(rank)
model_ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.rank],
)
inp = torch.randn(2, 10, device=rank)
for i in range(2):
if rank == 0:
model_ddp.module.buffer = model_ddp.module.buffer + 1
loss = model_ddp(inp).sum()
loss.backward()
# Ensure all buffers are synchronized.
bufs = [torch.empty_like(model_ddp.module.buffer) for _ in range(dist.get_world_size())]
dist.all_gather(bufs, model_ddp.module.buffer)
rank_0_buf = bufs[0]
for buf in bufs[1:]:
self.assertEqual(rank_0_buf, buf)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND != "nccl" and BACKEND != "gloo",
"Only Nccl & Gloo backend support DistributedDataParallel",
)
def test_sync_bn_logged(self):
model = BN_NET
rank = self.rank
# single gpu training setup
model_gpu = model.cuda(rank)
no_sync_bn = torch.nn.parallel.DistributedDataParallel(
copy.deepcopy(model_gpu),
device_ids=[self.rank],
)
ddp_logging_data = no_sync_bn._get_ddp_logging_data()
sync_bn_logged = ddp_logging_data.get("has_sync_bn", True)
self.assertFalse(sync_bn_logged)
model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(model_gpu)
model_DDP = torch.nn.parallel.DistributedDataParallel(
model_DDP,
device_ids=[self.rank],
)
ddp_logging_data = model_DDP._get_ddp_logging_data()
sync_bn_logged = ddp_logging_data.get("has_sync_bn", False)
self.assertTrue(sync_bn_logged)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["ddp"],
f"The {BACKEND} backend does not support DistributedDataParallel"
)
def test_stateless_api_with_ddp(self):
class MockModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(1, 1)
buffer = torch.ones(1)
self.register_buffer('buffer', buffer)
def forward(self, x):
return self.l1(x) + self.buffer
device = self.rank
module = MockModule().to(device)
# Disable DDP + ReplicatedTensor since stateless looks for 'module'
# whereas with ReplicatedTensor, we run '_replicated_tensor_module'
# in the forward pass.
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
with _ddp_replicated_tensor(False):
module = torch.nn.parallel.DistributedDataParallel(
module,
device_ids=[device]
)
x = torch.rand((1, 1)).to(device)
weight = torch.tensor([[1.0]], device=device, requires_grad=True)
bias = torch.tensor([0.0], device=device, requires_grad=True)
buffer = torch.tensor([0.0], device=device)
parameters = {'module.l1.weight': weight,
'module.l1.bias': bias,
'module.buffer': buffer}
prev_weight = module.module.l1.weight.clone()
prev_buffer = module.module.buffer.clone()
res = _stateless.functional_call(module, parameters, x)
self.assertEqual(x, res)
# check that the weight remain unmodified
cur_weight = module.module.l1.weight
cur_buffer = module.module.buffer
self.assertEqual(cur_weight, prev_weight)
self.assertEqual(cur_buffer, prev_buffer)
# run a backward pass and check the gradients
res.backward()
self.assertIsNotNone(weight.grad)
self.assertIsNotNone(bias.grad)
# Gradient was not calculated for the module stated and buffers
self.assertIsNone(buffer.grad)
self.assertIsNone(module.module.l1.weight.grad)
self.assertIsNone(module.module.l1.bias.grad)
self.assertIsNone(module.module.buffer.grad)
@require_backend(DistTestCases.backend_feature["gpu"])
@require_backends_available(DistTestCases.backend_feature["gpu"])
@skip_if_lt_x_gpu(2)
def test_ddp_forward_backward_hook(self):
class DummyTestModel(nn.Module):
def __init__(self):
super(DummyTestModel, self).__init__()
torch.manual_seed(0)
self.fc = nn.Linear(2, 2)
def forward(self, x):
return self.fc(x)
def relu_hook(module, input):
return nn.functional.relu(input[0])
def gelu_hook(module, _input, output):
return nn.functional.gelu(output)
def celu_hook(module, _input, output):
return (nn.functional.celu(output[0]),)
local_model = DummyTestModel()
ddp_model = DummyTestModel()
local_model.fc.register_forward_pre_hook(relu_hook)
local_model.fc.register_forward_hook(gelu_hook)
ddp_model.fc.register_forward_pre_hook(relu_hook)
ddp_model.fc.register_forward_hook(gelu_hook)
local_model.fc.register_backward_hook(celu_hook)
ddp_model.fc.register_backward_hook(celu_hook)
ddp_model = DistributedDataParallel(
ddp_model.to(self.rank), device_ids=[self.rank]
)
input_data = torch.rand(5, 2)
output_local = local_model(input_data)
output_ddp = ddp_model(input_data.to(self.rank))
self.assertEqual(output_local, output_ddp)
output_local.sum().backward()
output_ddp.sum().backward()
ddp_grads = [p.grad for p in ddp_model.parameters()]
self.assertEqual(ddp_grads[0], local_model.fc.weight.grad)
self.assertEqual(ddp_grads[1], local_model.fc.bias.grad)
def _test_hook_pickling(self, hook, hook_state):
torch.manual_seed(0)
learning_rate = 0.01
chkpt_file = tempfile.gettempdir() + "/checkpoint.pt"
rank = self.rank
input = torch.randn(7, 1, device=rank)
target = torch.randn(7, 5, device=rank)
net = torch.nn.Linear(1, 5).to(rank)
ddp_model = DistributedDataParallel(
copy.deepcopy(net),
device_ids=[rank]
)
dummy_ddp_model = DistributedDataParallel(
copy.deepcopy(net),
device_ids=[rank]
)
optimizer = torch.optim.SGD(ddp_model.parameters(), lr=learning_rate)
ddp_model.register_comm_hook(hook_state, hook)
ddp_model.train()
for _ in range(10):
optimizer.zero_grad()
out = ddp_model(input)
loss = F.mse_loss(out, target)
loss.backward()
optimizer.step()
state = {
'state_dict': ddp_model.state_dict(),
'comm_hook': hook,
'comm_hook_state': hook_state
}
if rank == 0:
with self.assertLogs() as captured:
torch.save(state, chkpt_file)
# Check that the logger has only one entry
self.assertEqual(len(captured.records), 1)
# Check that the logger has an expected entry
self.assertEqual(
captured.records[0].getMessage(),
"NOTE: Process group is not serializable and excluded from a saved state."
)
dist.barrier()
map_location = {'cuda:%d' % 0: 'cuda:%d' % rank}
with self.assertLogs() as captured:
checkpoint = torch.load(chkpt_file, map_location=map_location)
# Check that the logger has only one entry
self.assertEqual(len(captured.records), 1)
# Check that the logger has an expected entry
self.assertEqual(
captured.records[0].getMessage(),
"NOTE: Process group will be set to a default group (i.e. the world size).\
If a different group is desired, please set `self.process_group` after PowerSGD state is loaded."
)
dummy_ddp_model.load_state_dict(checkpoint['state_dict'])
dummy_hook = checkpoint['comm_hook']
dummy_hook_state = checkpoint['comm_hook_state']
dummy_optimizer = torch.optim.SGD(dummy_ddp_model.parameters(), lr=learning_rate)
# Check that loaded function is correct
self.assertEqual(dummy_hook.__qualname__, hook.__qualname__)
# Check that all slots' keys were restored correctly
self.assertEqual(hook_state.__slots__, dummy_hook_state.__slots__)
# Check that all slots' attributes are restored correctly
# Excluding ``process_group`` and ``rng``.
for entry in dummy_hook_state.__slots__:
if entry != "process_group" and entry != "rng":
self.assertEqual(getattr(dummy_hook_state, entry), getattr(hook_state, entry))
# Check that ``process_group`` was set to default
self.assertEqual(dummy_hook_state.process_group, _get_default_group())
# Check that a random state was restored properly:
# ``np.random.RandomState.get_state`` returns a tuple with entries:
# ``bit_generator`` - str,
# ``state.key`` - ndarray dtype[uint32],
# ``state.pos`` - int,
# ``has_gauss`` - int,
# ``gauss`` - float
# (refer to https://github.com/numpy/numpy/blob/266aad7478bc7fbcc55eea7f942a0d373b838396/numpy/random/mtrand.pyi)
# To make sure random state was restored properly, all entries should equal the original
for entry1, entry2 in zip(hook_state.rng.get_state(), dummy_hook_state.rng.get_state()):
np.testing.assert_array_equal(entry1, entry2)
dummy_ddp_model.register_comm_hook(dummy_hook_state, dummy_hook)
dummy_ddp_model.train()
for _ in range(10):
optimizer.zero_grad()
dummy_optimizer.zero_grad()
out_origin = ddp_model(input)
out_dummy = dummy_ddp_model(input)
loss_origin = F.mse_loss(out_origin, target)
loss_dummy = F.mse_loss(out_dummy, target)
loss_origin.backward()
loss_dummy.backward()
optimizer.step()
dummy_optimizer.step()
# Check that gradients after 10 epochs are the same
for orig_param, dummy_param in zip(ddp_model.parameters(), dummy_ddp_model.parameters()):
self.assertEqual(orig_param.grad, dummy_param.grad)
if rank == 0:
os.remove(chkpt_file)
@sandcastle_skip_if(
BACKEND not in DistTestCases.backend_feature["cuda"],
f"The {BACKEND} backend does not support DDP communication hook on CUDA devices"
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_ddp_hook_pickling_powerSGD(self):
hook = powerSGD.powerSGD_hook
powersgd_state = powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
start_powerSGD_iter=4,
)
self._test_hook_pickling(hook, powersgd_state)
instantiate_parametrized_tests(DistributedTest._DistTestBase)
| pytorch-master | torch/testing/_internal/distributed/distributed_test.py |
#!/usr/bin/env python3
import os
import sys
import unittest
from typing import Dict, List, Type
from torch.testing._internal.common_distributed import MultiProcessTestCase
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
find_free_port,
IS_SANDCASTLE,
)
from torch.testing._internal.distributed.ddp_under_dist_autograd_test import (
CudaDdpComparisonTest,
DdpComparisonTest,
DdpUnderDistAutogradTest,
)
from torch.testing._internal.distributed.pipe_with_ddp_test import (
PipeWithDDPTest,
)
from torch.testing._internal.distributed.nn.api.remote_module_test import (
CudaRemoteModuleTest,
RemoteModuleTest,
ThreeWorkersRemoteModuleTest,
)
from torch.testing._internal.distributed.rpc.dist_autograd_test import (
DistAutogradTest,
CudaDistAutogradTest,
FaultyAgentDistAutogradTest,
TensorPipeAgentDistAutogradTest,
TensorPipeCudaDistAutogradTest
)
from torch.testing._internal.distributed.rpc.dist_optimizer_test import (
DistOptimizerTest,
)
from torch.testing._internal.distributed.rpc.jit.dist_autograd_test import (
JitDistAutogradTest,
)
from torch.testing._internal.distributed.rpc.jit.rpc_test import JitRpcTest
from torch.testing._internal.distributed.rpc.jit.rpc_test_faulty import (
JitFaultyAgentRpcTest,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc.faulty_agent_rpc_test import (
FaultyAgentRpcTest,
)
from torch.testing._internal.distributed.rpc.rpc_test import (
CudaRpcTest,
RpcTest,
TensorPipeAgentRpcTest,
TensorPipeAgentCudaRpcTest,
)
from torch.testing._internal.distributed.rpc.examples.parameter_server_test import ParameterServerTest
from torch.testing._internal.distributed.rpc.examples.reinforcement_learning_rpc_test import (
ReinforcementLearningRpcTest,
)
def _check_and_set_tcp_init():
# if we are running with TCP init, set main address and port
# before spawning subprocesses, since different processes could find
# different ports.
use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None)
if use_tcp_init == "1":
os.environ["MASTER_ADDR"] = '127.0.0.1'
os.environ["MASTER_PORT"] = str(find_free_port())
def _check_and_unset_tcp_init():
use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None)
if use_tcp_init == "1":
del os.environ["MASTER_ADDR"]
del os.environ["MASTER_PORT"]
# The tests for the RPC module need to cover multiple possible combinations:
# - different aspects of the API, each one having its own suite of tests;
# - different agents (ProcessGroup, TensorPipe, ...);
# To avoid a combinatorial explosion in code size, and to prevent forgetting to
# add a combination, these are generated automatically by the code in this file.
# Here, we collect all the test suites that we need to cover.
# We then have one separate file for each agent, from which
# we call the generate_tests function of this file, passing to it a fixture for
# the agent, which then gets mixed-in with each test suite.
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN, "Skip ASAN as torch + multiprocessing spawn have known issues"
)
class SpawnHelper(MultiProcessTestCase):
def setUp(self):
super().setUp()
_check_and_set_tcp_init()
self._spawn_processes()
def tearDown(self):
_check_and_unset_tcp_init()
super().tearDown()
# This list contains test suites that are agent-agnostic and that only verify
# compliance with the generic RPC interface specification. These tests should
# *not* make use of implementation details of a specific agent (options,
# attributes, ...). These test suites will be instantiated multiple times, once
# for each agent (except the faulty agent, which is special).
GENERIC_TESTS = [
RpcTest,
ParameterServerTest,
DistAutogradTest,
DistOptimizerTest,
JitRpcTest,
JitDistAutogradTest,
RemoteModuleTest,
ThreeWorkersRemoteModuleTest,
DdpUnderDistAutogradTest,
DdpComparisonTest,
ReinforcementLearningRpcTest,
]
GENERIC_CUDA_TESTS = [
CudaRpcTest,
CudaDistAutogradTest,
CudaRemoteModuleTest,
CudaDdpComparisonTest,
PipeWithDDPTest,
]
# This list contains test suites that will only be run on the TensorPipeAgent.
# These suites should be standalone, and separate from the ones in the generic
# list (not subclasses of those!).
TENSORPIPE_TESTS = [
TensorPipeAgentRpcTest,
TensorPipeAgentDistAutogradTest,
]
TENSORPIPE_CUDA_TESTS = [
TensorPipeAgentCudaRpcTest,
TensorPipeCudaDistAutogradTest,
]
# This list contains test suites that will only be run on the faulty RPC agent.
# That agent is special as it's only used to perform fault injection in order to
# verify the error handling behavior. Thus the faulty agent will only run the
# suites in this list, which were designed to test such behaviors, and not the
# ones in the generic list.
FAULTY_AGENT_TESTS = [
FaultyAgentRpcTest,
FaultyAgentDistAutogradTest,
JitFaultyAgentRpcTest,
]
def generate_tests(
prefix: str,
mixin: Type[RpcAgentTestFixture],
tests: List[Type[RpcAgentTestFixture]],
module_name: str,
) -> Dict[str, Type[RpcAgentTestFixture]]:
"""Mix in the classes needed to autogenerate the tests based on the params.
Takes a series of test suites, each written against a "generic" agent (i.e.,
derived from the abstract RpcAgentTestFixture class), as the `tests` args.
Takes a concrete subclass of RpcAgentTestFixture, which specializes it for a
certain agent, as the `mixin` arg. Produces all combinations of them.
Returns a dictionary of class names to class type
objects which can be inserted into the global namespace of the calling
module. The name of each test will be a concatenation of the `prefix` arg
and the original name of the test suite.
The `module_name` should be the name of the calling module so
that the classes can be fixed to make it look like they belong to it, which
is necessary for pickling to work on them.
"""
ret: Dict[str, Type[RpcAgentTestFixture]] = {}
for test_class in tests:
if IS_SANDCASTLE and TEST_WITH_DEV_DBG_ASAN:
print(
f'Skipping test {test_class} on sandcastle for the following reason: '
'Skip dev-asan as torch + multiprocessing spawn have known issues', file=sys.stderr)
continue
name = f"{prefix}{test_class.__name__}"
class_ = type(name, (test_class, mixin, SpawnHelper), dict())
class_.__module__ = module_name
ret[name] = class_
return ret
| pytorch-master | torch/testing/_internal/distributed/rpc_utils.py |
pytorch-master | torch/testing/_internal/distributed/pipeline/__init__.py |
|
pytorch-master | torch/testing/_internal/distributed/nn/__init__.py |
|
pytorch-master | torch/testing/_internal/distributed/nn/api/__init__.py |
|
#!/usr/bin/python3
import enum
from typing import Tuple
import torch
import torch.distributed.rpc as rpc
import torch.testing._internal.dist_utils as dist_utils
from torch import Tensor, nn
from torch._jit_internal import Future
from torch.distributed.nn import RemoteModule
from torch.distributed.nn.api.remote_module import _REMOTE_MODULE_PICKLED_ATTRIBUTES
from torch.distributed.nn.api.remote_module import _RemoteModule
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import TemporaryFileName
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
_PARAM_VAL = torch.nn.Parameter(torch.ones(1))
# RPC handler for querying the device on the destination worker.
def remote_device(module_rref):
for param in module_rref.local_value().parameters():
return param.device
# RPC handler for querying __dict__ on the destination worker.
def remote_module_attributes(remote_module):
return remote_module.__dict__
# RPC handler for running forward on the destination worker.
def remote_forward(remote_module, args):
return remote_module.forward(*args)
# RPC handler for running forward_async on the destination worker.
def remote_forward_async(remote_module, args):
# Since future cannot be pickled and sent over the RPC layer,
# have to wait and behave just like ``forward_sync``.
return remote_module.forward_async(*args).wait()
# RPC handler for getting training mode on the destination worker.
def get_remote_training_arg(module_rref):
return module_rref.local_value().training
class ModuleCreationMode(enum.Enum):
MODULE_CTOR_WITH_INTERFACE = "module_ctor_with_interface"
MODULE_CTOR = "module_ctor"
@torch.jit.interface
class MyModuleInterface:
def forward(
self, tensor: Tensor, number: int, word: str = "default"
) -> Tuple[str, int, Tensor]:
# pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
pass
@torch.jit.interface
class RemoteMyModuleInterface:
def forward(
self, tensor: Tensor, number: int, word: str = "default"
) -> Tuple[str, int, Tensor]:
# pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
pass
def forward_async(
self, tensor: Tensor, number: int, word: str = "default"
) -> Future[Tuple[str, int, Tensor]]:
pass
class MyModule(nn.Module):
def __init__(self, first_arg, first_kwarg=-1):
super().__init__()
self.param1 = _PARAM_VAL
def forward(
self, tensor: Tensor, number: int, word: str = "default"
) -> Tuple[str, int, Tensor]:
return word, number, tensor
class BadModule:
def __init__(self, first_arg, first_kwarg=-1):
pass
def create_scripted_module(first_arg, first_kwarg=-1):
module = MyModule(first_arg, first_kwarg=first_kwarg)
scripted_module = torch.jit.script(module)
return scripted_module
# Common utils for both CPU and CUDA test suites
class CommonRemoteModuleTest(RpcAgentTestFixture):
@property
def world_size(self): # Override setting in RpcAgentTestFixture
return 2
@staticmethod
def _create_remote_module_iter(remote_device, modes=None):
if modes is None:
modes = ModuleCreationMode.__members__.values()
args = (1,)
kwargs = dict(first_kwarg=2)
if ModuleCreationMode.MODULE_CTOR in modes:
remote_module = RemoteModule(remote_device, MyModule, args, kwargs)
yield remote_module
if ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE in modes:
remote_module = _RemoteModule(
remote_device,
create_scripted_module,
args,
kwargs,
_module_interface_cls=MyModuleInterface,
)
scripted_remote_module = torch.jit.script(remote_module)
yield scripted_remote_module
class RemoteModuleTest(CommonRemoteModuleTest):
@dist_utils.dist_init
def test_bad_module(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
remote_device = "{}/cpu".format(dst_worker_name)
args = (1,)
kwargs = dict(first_kwarg=2)
with self.assertRaisesRegex(
ValueError,
r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of <class nn.Module>,",
):
RemoteModule(remote_device, BadModule, args, kwargs).forward()
with self.assertRaisesRegex(
ValueError,
r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of <class nn.Module>,",
):
RemoteModule(remote_device, BadModule, args, kwargs).forward()
@dist_utils.dist_init
def test_forward_async(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
args = (torch.ones(1), 2, "3")
for remote_module in self._create_remote_module_iter(dst_worker_name):
ret_fut = remote_module.forward_async(*args)
ret = ret_fut.wait()
self.assertEqual(ret, tuple(reversed(args)))
@dist_utils.dist_init
def test_forward_async_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
scripted_remote_module = next(
self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
)
)
@torch.jit.script
def run_forward_async(scripted_remote_module: RemoteMyModuleInterface):
ret_fut = scripted_remote_module.forward_async(torch.ones(1), 2, "3")
ret = ret_fut.wait()
return ret
ret = run_forward_async(scripted_remote_module)
self.assertEqual(ret, ("3", 2, torch.ones(1)))
@dist_utils.dist_init
def test_forward_sync(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
args = (torch.ones(1), 2, "3")
for remote_module in self._create_remote_module_iter(dst_worker_name):
ret = remote_module.forward(*args)
self.assertEqual(ret, tuple(reversed(args)))
@dist_utils.dist_init
def test_forward_sync_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
scripted_remote_module = next(
self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
)
)
@torch.jit.script
def run_forward(scripted_remote_module: MyModuleInterface):
ret = scripted_remote_module.forward(torch.ones(1), 2, "3")
return ret
ret = run_forward(scripted_remote_module)
self.assertEqual(ret, ("3", 2, torch.ones(1)))
@dist_utils.dist_init
def test_forward_with_kwargs(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
args = (torch.ones(1), 2)
kwargs = dict(word="3")
# Only test Python nn.Module, because script module methods don't support taking kwargs.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
ret_fut = remote_module.forward_async(*args, **kwargs)
ret = ret_fut.wait()
self.assertEqual(ret, tuple(reversed(args + ("3",))))
ret = remote_module.forward(*args, **kwargs)
self.assertEqual(ret, tuple(reversed(args + ("3",))))
@dist_utils.dist_init
def test_remote_parameters(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
# Only test Python nn.Module, because script module methods don't support ``remote_parameters``.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
param_rrefs = remote_module.remote_parameters()
self.assertEqual(len(param_rrefs), 1)
self.assertTrue(torch.equal(param_rrefs[0].to_here(), _PARAM_VAL))
@dist_utils.dist_init
def test_get_module_rref(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
# Only test Python nn.Module, because script module methods don't support ``get_module_rref``.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
rref = remote_module.get_module_rref()
self.assertEqual(rref, remote_module.module_rref)
for param in rref.to_here().parameters():
self.assertTrue(torch.equal(param, _PARAM_VAL))
@dist_utils.dist_init
def test_train_eval(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
remote_module.train()
ret1 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),))
self.assertEqual(ret1, True)
remote_module.eval()
ret2 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),))
self.assertEqual(ret2, False)
@dist_utils.dist_init
def test_unsupported_methods(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
with self.assertRaisesRegex(
ValueError, r"Method ``register_buffer`` not supported for RemoteModule"
):
remote_module.register_buffer("buffer", torch.ones(5))
with self.assertRaisesRegex(
ValueError,
r"Method ``register_parameter`` not supported for RemoteModule",
):
remote_module.register_parameter(
"param", torch.nn.Parameter(torch.ones(1))
)
with self.assertRaisesRegex(
ValueError, r"Method ``add_module`` not supported for RemoteModule"
):
remote_module.add_module("empty", None)
with self.assertRaisesRegex(
ValueError, r"Method ``apply`` not supported for RemoteModule"
):
fn = torch.rand((3, 3), requires_grad=False)
remote_module.apply(fn)
with self.assertRaisesRegex(
ValueError, r"Method ``cuda`` not supported for RemoteModule"
):
remote_module.cuda()
with self.assertRaisesRegex(
ValueError, r"Method ``cpu`` not supported for RemoteModule"
):
remote_module.cpu()
with self.assertRaisesRegex(
ValueError, r"Method ``type`` not supported for RemoteModule"
):
remote_module.type(torch.FloatTensor)
with self.assertRaisesRegex(
ValueError, r"Method ``float`` not supported for RemoteModule"
):
remote_module.float()
with self.assertRaisesRegex(
ValueError, r"Method ``double`` not supported for RemoteModule"
):
remote_module.double()
with self.assertRaisesRegex(
ValueError, r"Method ``bfloat16`` not supported for RemoteModule"
):
remote_module.bfloat16()
with self.assertRaisesRegex(
ValueError, r"Method ``to`` not supported for RemoteModule"
):
remote_module.to("cpu", dtype=torch.int32)
def hook(module, grad_input, grad_output):
pass
with self.assertRaisesRegex(
ValueError,
r"Method ``register_backward_hook`` not supported for RemoteModule",
):
remote_module.register_backward_hook(hook)
with self.assertRaisesRegex(
ValueError,
r"Method ``register_forward_pre_hook`` not supported for RemoteModule",
):
remote_module.register_forward_pre_hook(hook)
with self.assertRaisesRegex(
ValueError,
r"Method ``register_forward_hook`` not supported for RemoteModule",
):
remote_module.register_forward_hook(hook)
with self.assertRaisesRegex(
ValueError, r"Method ``state_dict`` not supported for RemoteModule"
):
remote_module.state_dict()
with self.assertRaisesRegex(
ValueError, r"Method ``load_state_dict`` not supported for RemoteModule"
):
remote_module.load_state_dict({})
with self.assertRaisesRegex(
ValueError,
r"Method ``parameters`` not supported for RemoteModule. Please use ``remote_parameters`` instead.",
):
remote_module.parameters()
with self.assertRaisesRegex(
ValueError,
r"Method ``named_parameters`` not supported for RemoteModule",
):
remote_module.named_parameters()
with self.assertRaisesRegex(
ValueError, r"Method ``buffers`` not supported for RemoteModule"
):
remote_module.buffers()
with self.assertRaisesRegex(
ValueError, r"Method ``named_buffers`` not supported for RemoteModule"
):
remote_module.named_buffers()
with self.assertRaisesRegex(
ValueError, r"Method ``children`` not supported for RemoteModule"
):
remote_module.children()
with self.assertRaisesRegex(
ValueError, r"Method ``named_children`` not supported for RemoteModule"
):
remote_module.named_children()
with self.assertRaisesRegex(
ValueError, r"Method ``modules`` not supported for RemoteModule"
):
remote_module.modules()
with self.assertRaisesRegex(
ValueError, r"Method ``named_modules`` not supported for RemoteModule"
):
remote_module.named_modules()
with self.assertRaisesRegex(
ValueError, r"Method ``requires_grad_`` not supported for RemoteModule"
):
remote_module.requires_grad_()
with self.assertRaisesRegex(
ValueError, r"Method ``zero_grad`` not supported for RemoteModule"
):
remote_module.zero_grad()
with self.assertRaisesRegex(
ValueError, r"Method ``share_memory`` not supported for RemoteModule"
):
remote_module.share_memory()
with self.assertRaisesRegex(
ValueError, r"Method ``extra_repr`` not supported for RemoteModule"
):
remote_module.extra_repr()
@dist_utils.dist_init
def test_send_remote_module_with_a_new_attribute_not_pickled_over_the_wire(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
# If a new attribute is added to this RemoteModule after the initialization,
# and it will be sent over the wire by RPC,
# this new field will not be pickled, because it's not specified in _REMOTE_MODULE_PICKLED_ATTRIBUTES.
# Note that adding a new attribute out of constructor should rarely happen.
# If a new attribute is added to RemoteModule constructor,
# there is a sanity check to enforce developers to add this attribute to either
# _REMOTE_MODULE_PICKLED_ATTRIBUTES or _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
new_attr_name = "new_attr"
setattr(remote_module, new_attr_name, 1)
attrs = rpc.rpc_sync(
dst_worker_name, remote_module_attributes, (remote_module,)
)
self.assertNotIn(new_attr_name, attrs)
@dist_utils.dist_init
def test_remote_module_py_pickle_not_supported(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
with TemporaryFileName() as fname:
with self.assertRaisesRegex(
RuntimeError,
"Cannot pickle RemoteModule in python pickler. RemoteModule can only be pickled when using RPC",
):
torch.save(remote_module, fname)
@dist_utils.dist_init
def test_remote_module_py_pickle_not_supported_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
):
with TemporaryFileName() as fname:
with self.assertRaisesRegex(torch.jit.Error, "can only be pickled when using RPC"):
torch.save(remote_module, fname)
class ThreeWorkersRemoteModuleTest(CommonRemoteModuleTest):
@property
def world_size(self): # Override setting in CommonRemoteModuleTest
return 3
@dist_utils.dist_init
def test_send_remote_module_over_the_wire(self):
if self.rank != 0:
return
dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size)
# Unpickled attribtes include both the inherent attributes of RemoteModule
# (not inherited from the superclass) and two installed methods.
expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES)
expected_unpickled_attrs.append("forward_async")
expected_unpickled_attrs.append("forward")
# Create a remote module on worker1 and then pass it to worker2 over the RPC layer.
for remote_module in self._create_remote_module_iter(
dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
# Test querying some simple attributes from worker2.
attrs = rpc.rpc_sync(
dst_worker2_name, remote_module_attributes, (remote_module,)
)
self.assertListEqual(list(attrs.keys()), expected_unpickled_attrs)
self.assertEqual(attrs["on"], "worker1")
self.assertEqual(attrs["device"], "cpu")
self.assertFalse(attrs["is_device_map_set"])
self.assertFalse(attrs["is_scriptable"])
# Test the installed methods on worker1's can be initiated by worker2 over RPC layer.
# NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``forward_async``,
# not have another worker to initiate forward over the RPC layer.
args = (torch.ones(1), 2, "3")
ret1 = rpc.rpc_sync(dst_worker2_name, remote_forward, (remote_module, args))
self.assertEqual(ret1, tuple(reversed(args)))
ret2 = rpc.rpc_sync(
dst_worker2_name, remote_forward_async, (remote_module, args)
)
self.assertEqual(ret2, tuple(reversed(args)))
@dist_utils.dist_init
def test_send_remote_module_over_the_wire_script_not_supported(self):
if self.rank != 0:
return
dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size)
# Unpickled attribtes include both the inherent attributes of RemoteModule
# (not inherited from the superclass) and two installed methods.
expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES)
expected_unpickled_attrs.append("forward_async")
expected_unpickled_attrs.append("forward")
with self.assertRaisesRegex(
RuntimeError, "Passing a script RemoteModule over RPC is not supported."
):
# Create a remote module on worker1 and then pass it to worker2 over the RPC layer.
for remote_module in self._create_remote_module_iter(
dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
):
# Test querying some simple attributes from worker2.
attrs = rpc.rpc_sync(
dst_worker2_name, remote_module_attributes, (remote_module,)
)
@dist_utils.dist_init
def test_create_remote_module_from_module_rref(self):
if self.rank != 0:
return
dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size)
# Create a remote module on worker1 and then pass its `module_rref` to worker2 over the RPC layer.
for remote_module in self._create_remote_module_iter(
dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
remote_module2 = rpc.rpc_sync(
dst_worker2_name,
RemoteModule.init_from_module_rref,
(dst_worker2_name, remote_module.get_module_rref()),
)
args = (torch.ones(1), 2, "3")
ret1 = rpc.rpc_sync(
dst_worker1_name, remote_forward, (remote_module, args)
)
ret2 = rpc.rpc_sync(
dst_worker2_name, remote_forward, (remote_module2, args)
)
self.assertEqual(ret2, ret2)
class CudaRemoteModuleTest(CommonRemoteModuleTest):
@skip_if_lt_x_gpu(1)
@dist_utils.dist_init
def test_valid_device(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = dist_utils.worker_name(dst_rank)
for remote_module in self._create_remote_module_iter(
"{}/cuda:0".format(dst_worker_name), modes=[ModuleCreationMode.MODULE_CTOR]
):
device = rpc.rpc_sync(
dst_worker_name, remote_device, (remote_module.module_rref,)
)
self.assertEqual(device.type, "cuda")
self.assertEqual(device.index, 0)
# Test rank works as well.
for remote_module in self._create_remote_module_iter(
"rank:{}/cuda:0".format(dst_rank), modes=[ModuleCreationMode.MODULE_CTOR]
):
device = rpc.rpc_sync(
dst_worker_name, remote_device, (remote_module.module_rref,)
)
self.assertEqual(device.type, "cuda")
self.assertEqual(device.index, 0)
@skip_if_lt_x_gpu(1)
@dist_utils.dist_init
def test_invalid_devices(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError,
r"Expected one of .+ device type at start of device string",
):
list(
m.forward()
for m in self._create_remote_module_iter(
"{}/foo".format(dst_worker_name),
modes=[ModuleCreationMode.MODULE_CTOR],
)
)
with self.assertRaisesRegex(
RuntimeError, r"CUDA error: invalid device ordinal"
):
list(
m.forward()
for m in self._create_remote_module_iter(
"{}/cuda:100".format(dst_worker_name),
modes=[ModuleCreationMode.MODULE_CTOR],
)
)
with self.assertRaisesRegex(RuntimeError, r"Invalid device string: 'cpu2'"):
list(
m.forward()
for m in self._create_remote_module_iter(
"{}/cpu2".format(dst_worker_name),
modes=[ModuleCreationMode.MODULE_CTOR],
)
)
with self.assertRaisesRegex(RuntimeError, r"Device string must not be empty"):
list(
m.forward()
for m in self._create_remote_module_iter(
"{}/".format(dst_worker_name),
modes=[ModuleCreationMode.MODULE_CTOR],
)
)
with self.assertRaisesRegex(
ValueError,
r"Could not parse remote_device: worker1/cuda:0/cuda:1. The valid format is '<workername>/<device>'",
):
list(
m.forward()
for m in self._create_remote_module_iter(
"{}/cuda:0/cuda:1".format(dst_worker_name),
modes=[ModuleCreationMode.MODULE_CTOR],
)
)
with self.assertRaisesRegex(
ValueError,
r"Could not parse remote_device: /. The valid format is '<workername>/<device>'",
):
list(
m.forward()
for m in self._create_remote_module_iter(
"/",
modes=[ModuleCreationMode.MODULE_CTOR],
)
)
with self.assertRaisesRegex(
ValueError,
r"Could not parse remote_device: /cuda:0. The valid format is '<workername>/<device>'",
):
list(
m.forward()
for m in self._create_remote_module_iter(
"/cuda:0",
modes=[ModuleCreationMode.MODULE_CTOR],
)
)
@skip_if_lt_x_gpu(1)
@dist_utils.dist_init
def test_input_moved_to_cuda_device(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
# These two CPU tensors (in args and kwargs) should be implicitly moved to an appropriate cuda device.
t1 = torch.ones(1)
args = (t1, 2)
t2 = t1 * 2
kwargs = dict(word=t2)
# Only test Python nn.Module, because script module methods don't support taking kwargs.
for remote_module in self._create_remote_module_iter(
"{}/cuda:0".format(dst_worker_name), modes=[ModuleCreationMode.MODULE_CTOR]
):
ret_fut = remote_module.forward_async(*args, **kwargs)
ret = ret_fut.wait()
self.assertEqual(ret, tuple(reversed(args + (t2,))))
# TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0".
self.assertEqual(ret[0].device.type, "cpu")
self.assertEqual(ret[2].device.type, "cpu")
ret = remote_module.forward(*args, **kwargs)
self.assertEqual(ret, tuple(reversed(args + (t2,))))
# TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0".
self.assertEqual(ret[0].device.type, "cpu")
self.assertEqual(ret[2].device.type, "cpu")
@skip_if_lt_x_gpu(1)
@dist_utils.dist_init
def test_input_moved_to_cuda_device_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
scripted_remote_module = next(
self._create_remote_module_iter(
"{}/cuda:0".format(dst_worker_name),
modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE],
)
)
@torch.jit.script
def run_forward(scripted_remote_module: MyModuleInterface):
ret = scripted_remote_module.forward(torch.ones(1), 2, "3")
return ret
ret = run_forward(scripted_remote_module)
self.assertEqual(ret, ("3", 2, torch.ones(1)))
# TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0".
self.assertEqual(ret[2].device.type, "cpu")
| pytorch-master | torch/testing/_internal/distributed/nn/api/remote_module_test.py |
import torch
import torch.nn as nn
from torch.distributed._shard.sharded_tensor import ShardedTensor
class SimpleMegatronLM(nn.Module):
def __init__(self, linear_size, rank=None, dtype=torch.float32):
super().__init__()
self.fc1 = nn.Linear(*linear_size[0], dtype=dtype)
self.gelu = nn.GELU()
self.fc2 = nn.Linear(*linear_size[1], dtype=dtype)
if rank is not None:
self.fc1.cuda(rank)
self.fc2.cuda(rank)
def forward(self, inp):
return self.fc2(self.gelu(self.fc1(inp)))
def get_weights(self):
if isinstance(self.fc1.weight, ShardedTensor):
weight1 = self.fc1.weight.local_tensor()
else:
weight1 = self.fc1.weight
if isinstance(self.fc2.weight, ShardedTensor):
weight2 = self.fc2.weight.local_tensor()
else:
weight2 = self.fc2.weight
return (weight1, weight2)
def get_biases(self):
return (self.fc1.bias, self.fc2.bias)
def get_weight_grads(self):
return (self.fc1.weight.grad, self.fc2.weight.grad)
def get_bias_grads(self):
return (self.fc1.bias.grad, self.fc2.bias.grad)
| pytorch-master | torch/testing/_internal/distributed/_shard/test_common.py |
pytorch-master | torch/testing/_internal/distributed/_shard/__init__.py |
|
import sys
from functools import wraps, partial
import torch
import torch.distributed as dist
from torch.distributed import rpc
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
TEST_SKIPS,
tp_transports,
)
TEST_GPU_NUM = 4
class ShardedTensorTestBase(MultiProcessTestCase):
@property
def world_size(self):
return TEST_GPU_NUM
def init_pg(self, backend="nccl"):
if backend not in ["nccl", "gloo", "mpi"]:
raise RuntimeError(f"Backend {backend} not supported!")
dist.init_process_group(
backend=backend,
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
# set device for nccl pg for collectives
if backend == "nccl":
torch.cuda.set_device(self.rank)
def init_rpc(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(_transports=tp_transports())
rpc_backend_options.init_method = f"file://{self.file_name}"
for rank in range(self.world_size):
rpc_backend_options.set_device_map(
f"worker{rank}", {rank: self.rank, self.rank: rank}
)
rpc.init_rpc(
name="worker%d" % self.rank,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
def init_comms(self, init_rpc=True, backend="nccl"):
if init_rpc:
self.init_rpc()
self.init_pg(backend=backend)
def destroy_comms(self, destroy_rpc=True):
# Wait for all ranks to reach here before starting shutdown.
dist.barrier()
if destroy_rpc:
rpc.shutdown()
dist.destroy_process_group()
def setUp(self) -> None:
super().setUp()
self._spawn_processes()
def assert_sharded_tensor_equal(self, st1, st2):
st1_local_shards = st1.local_shards()
st2_local_shards = st2.local_shards()
self.assertEqual(len(st1_local_shards), len(st2_local_shards))
for i, st1_local_shard in enumerate(st1_local_shards):
self.assertEqual(st1_local_shard.tensor, st2_local_shards[i].tensor)
self.assertEqual(st1_local_shard.metadata, st2_local_shards[i].metadata)
self.assertEqual(st1.metadata(), st2.metadata())
self.assertEqual(st1.sharding_spec(), st2.sharding_spec())
self.assertEqual(len(st1.remote_shards()), len(st2.remote_shards()))
# wrapper to initialize comms (processgroup + rpc)
def with_comms(func=None, init_rpc=True, backend="nccl"):
if func is None:
return partial(
with_comms,
init_rpc=init_rpc,
backend=backend,
)
@wraps(func)
def wrapper(self, *args, **kwargs):
if backend == "nccl" and torch.cuda.device_count() < self.world_size:
sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
self.init_comms(init_rpc=init_rpc, backend=backend)
func(self)
self.destroy_comms(destroy_rpc=init_rpc)
return wrapper
| pytorch-master | torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py |
import builtins
import torch
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
EnumerableShardingSpec,
ShardMetadata,
)
from torch.distributed._shard.sharding_spec._internals import (
get_chunked_dim_size,
get_split_size,
)
def generate_chunk_sharding_specs_for_test(sharding_dim):
return [
ChunkShardingSpec(
dim=sharding_dim,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
),
# Test different ordering. (Case 1)
ChunkShardingSpec(
dim=sharding_dim,
placements=[
"rank:2/cuda:2",
"rank:3/cuda:3",
"rank:0/cuda:0",
"rank:1/cuda:1",
],
),
# Test different ordering. (Case 2)
ChunkShardingSpec(
dim=sharding_dim,
placements=[
"rank:3/cuda:3",
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
],
),
]
def generate_enumerable_sharding_specs_for_test():
return [
EnumerableShardingSpec(
[
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[0, 5],
shard_sizes=[5, 5],
placement="rank:2/cuda:2",
),
ShardMetadata(
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="rank:3/cuda:3",
),
]
)
]
def generate_local_weight_sharding_params_for_test(
local_weight, sharded_dim, gpu_num, spec, rank
):
"""
Shard the local weight based the given spec, so we can compare against
the one from sharded tensor.
Args:
local_weight: weight matrix to be sharded.
sharded_dim: The dimension which we shard on.
gpu_num: number of ranks.
spec: shareding spec.
rank: # of cuda process.
Returns:
start_pos: start position of sharded weight on the given rank.
chunk_size: chunk size of sharded weight on the given rank.
"""
sharding_dim_size = local_weight.size(sharded_dim)
split_size = get_split_size(sharding_dim_size, gpu_num)
current_offsets = 0
start_pos = current_offsets
for idx, placement in enumerate(spec.placements):
chunk_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)
if rank == placement.rank():
start_pos = current_offsets
break
current_offsets += chunk_size
return start_pos, chunk_size
def clone_module_parameter(module, param_name):
"""
Clone a parameter from a given existing module.
Args:
module (:class:`torch.nn.Module`): Module whose parameter needs to be cloned.
param_name (str): Name of the parameter of ``module`` that needs to be cloned.
Returns: cloned tensor as :class:`torch.nn.Parameter`.
"""
tensor = getattr(module, param_name)
return torch.nn.Parameter(tensor.detach().clone())
def gen_binary_op_func(python_op, inplace=False):
src_lines = ['def f(lhs, rhs):']
if "torch" in python_op:
src_lines.append(f' return {python_op}(lhs, rhs)\n')
elif inplace:
src_lines.append(f' lhs {python_op}= rhs\n return lhs\n')
else:
src_lines.append(f' return lhs {python_op} rhs\n')
code_str = '\n'.join(src_lines)
g = {'torch': torch}
builtins.exec(code_str, g)
return g["f"]
| pytorch-master | torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py |
import copy
import random
import torch
from torch.distributed._shard import sharded_tensor
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
)
PLACEMENTS = [
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
]
DEFAULT_GPU_NUM = 4
def _chunk_sharding_specs_list_for_test(sharding_dims, seed=0):
spec_list = []
for i in range(len(sharding_dims)):
random.Random(seed + i).shuffle(PLACEMENTS)
spec_list.append(
ChunkShardingSpec(
dim=sharding_dims[i],
placements=copy.deepcopy(PLACEMENTS),
)
)
return spec_list
class MyShardedModel2(torch.nn.Module):
def __init__(
self,
spec=None,
group=None,
init_rrefs=True
) -> None:
super(MyShardedModel2, self).__init__()
if spec is not None:
self.sharded_tensor2 = sharded_tensor.rand(
spec, 10, 20, process_group=group, init_rrefs=init_rrefs
)
else:
self.sharded_tensor2 = None
self.random_tensor2 = torch.nn.Parameter(torch.rand(2, 2))
class MyShardedModel1(torch.nn.Module):
def __init__(
self,
spec=None,
group=None,
init_rrefs=True
) -> None:
super(MyShardedModel1, self).__init__()
if spec is not None:
self.sharded_tensor1 = sharded_tensor.rand(
spec, 10, 20, process_group=group, init_rrefs=init_rrefs
)
else:
self.sharded_tensor1 = None
self.random_tensor1 = torch.nn.Parameter(torch.rand(2, 2))
self.submodule = MyShardedModel2(spec, group, init_rrefs)
| pytorch-master | torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py |
import torch.distributed.rpc as rpc
import torch.distributed.rpc._testing # noqa: F401
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
# The following message types are currently retried in the RREF protocol and
# distributed autograd. Thus only these messages should be tested with the
# Faulty RPC Agent.
retryable_message_types = ["RREF_FORK_REQUEST",
"RREF_CHILD_ACCEPT",
"RREF_USER_DELETE",
"CLEANUP_AUTOGRAD_CONTEXT_REQ"]
# The following messages incur the corresponding delay in seconds while being
# processed in FaultyTensorPipeAgent's enqueueSend() function.
default_messages_to_delay = {
"PYTHON_CALL": 1.5, # Python UDF
"SCRIPT_CALL": 1.5, # Script/Builtin
}
class FaultyRpcAgentTestFixture(RpcAgentTestFixture):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.messages_to_fail = retryable_message_types
self.messages_to_delay = default_messages_to_delay
@property
def rpc_backend(self):
return rpc.backend_registry.BackendType[
"FAULTY_TENSORPIPE"
]
@property
def rpc_backend_options(self):
return rpc.backend_registry.construct_rpc_backend_options(
self.rpc_backend,
init_method=self.init_method,
num_worker_threads=8,
num_fail_sends=3,
messages_to_fail=self.messages_to_fail,
messages_to_delay=self.messages_to_delay,
)
def setup_fault_injection(self, faulty_messages, messages_to_delay):
if faulty_messages is not None:
self.messages_to_fail = faulty_messages
if messages_to_delay is not None:
self.messages_to_delay = messages_to_delay
def get_shutdown_error_regex(self):
error_regexes = [
"Exception in thread pool task",
"Connection reset by peer",
"Connection closed by peer"
]
return "|".join(["({})".format(error_str) for error_str in error_regexes])
def get_timeout_error_regex(self):
return "RPC ran for more than"
| pytorch-master | torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py |
import torch
import time
import torch.distributed.rpc as rpc
from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs
from torch.testing._internal.dist_utils import (
dist_init,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
def add_rref_to_value(rref, value):
return rref.to_here() + value
class FaultyAgentRpcTest(RpcAgentTestFixture):
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init(messages_to_delay={})
def test_check_failed_messages(self):
if self.rank == 0:
dst_worker_b = worker_name((self.rank + 1) % self.world_size)
dst_worker_c = worker_name((self.rank + 2) % self.world_size)
# Worker0 sends RPC to Worker1 and creates an RRef there
rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
# Worker0 sends an RPC to Worker2 with the RRef as an arg
rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
# check if the output is as expected
self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
# explicitly delete all User RRefs
_delete_all_user_and_unforked_owner_rrefs()
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
def test_custom_faulty_messages(self):
self.assertEqual(
set(["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]),
set(self.rpc_backend_options.messages_to_fail),
)
@dist_init(faulty_messages=[])
def test_no_faulty_messages(self):
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
@dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
def test_custom_messages_to_delay(self):
self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
def _test_remote_message_dropped_pickle(self, dst=None):
if self.rank != 0:
return
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref._serialize()
# Test that using RRef as arg over RPC (which forks) results in the same
# error
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle(self):
self._test_remote_message_dropped_pickle()
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle_to_self(self):
self._test_remote_message_dropped_pickle(self.rank)
def _test_remote_message_dropped_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# test the case where rpc.remote() message creation is completely dropped.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, func, args=args)
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
# on the owning nodes, this is expected because the OwnerRRef was never
# successfully created. Therefore, delAllUsers will work as expected.
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args, dst=0)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout_to_self(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args, dst=0)
def _test_remote_message_delay_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# Test the case where remote message is eventually processed on the owner,
# but the future on the creator times out before the response comes back.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# to_here() should now pick up that rpc.remote() creation has failed.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Test the case where rpc.remote() times out, but to_here() has already
# started blocking before.
# NOTE: we only test this when not sending to self, as to_here() calls
# calls localValue(), which does not send an RPC and thus does not have
# a timeout. This can be supported by allowing future.wait() to
# take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
if dst_rank != self.rank:
slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
with self.assertRaisesRegex(RuntimeError, expected_error):
# to_here() should raise timeout error, since it does not know about the
# status of rpc.remote().
slow_rref.to_here(0.001)
# Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
# but this can be a noop since it may not exist on the owner yet. Later,
# the owner can process the RRef creation and wait for the delete message,
# thus leading to a timeout.
# Therefore, we wait until we get notification that pending owners have
# been confirmed before sending out RRefUserDeletes.
if dst_rank != self.rank:
wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_delay_timeout(func, args)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout_to_self(self):
func = my_sleep_func
args = (1,)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout_to_self(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref.to_here(0.01)
rref.to_here()
@dist_init(faulty_messages=[])
def test_rpc_builtin_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
# PYTHON_CALL message types which correspond to Python UDF over RPC
# by default get a delay (see faulty_rpc_agent_test_fixture)
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(
dst_worker,
torch.add,
args=(torch.tensor(1), torch.tensor(1)),
timeout=1,
)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_rpc_script_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
| pytorch-master | torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py |
pytorch-master | torch/testing/_internal/distributed/rpc/__init__.py |
|
import sys
import threading
import time
from enum import Enum
import random
import torch
import torch.nn as nn
from datetime import timedelta
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.testing._internal.dist_utils
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.distributed.rpc import RRef
from torch.testing._internal.common_utils import IS_MACOS, sandcastle_skip_if
from torch.testing._internal.dist_utils import (
dist_init,
initialize_pg,
wait_until_node_failure,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
# Right now we test up to 3-layer nested rpc calls.
# rpc_done[1] and ctx_ids[1] represent rpc is done in prev rank, and context id
# sent from prev rank respectively.
# rpc_done[2] and ctx_ids[2] represents for prev of prev rank.
# rpc_done[3] and ctx_ids[3] represents for prev of prev of prev rank.
# rpc_done[0] and ctx_ids[0] represents for current rank, but mostly not used.
rpc_done = [False, False, False, False]
ctx_ids = [-1, -1, -1, -1]
known_context_ids = set()
requires_grad_tensor = torch.ones(3, 3, requires_grad=True)
# Send rpc done info and context_id to
# dst_rank = (self.rank + rank_distance) % self.world_size
# we don't need a lock here since the GIL is held while executing remote
# python UDFs, so access is serialized across several workers.
def _set_rpc_done(ctx_id, rank_distance):
global rpc_done
global ctx_ids
global known_context_ids
rpc_done[rank_distance] = True
ctx_ids[rank_distance] = ctx_id
known_context_ids.add(ctx_id)
def _check_rpc_done(rank_distance):
while not rpc_done[rank_distance]:
time.sleep(0.1)
def _torch_ones(sizes, requires_grad=False):
return torch.ones(sizes, requires_grad=requires_grad)
# This method must be called on the rref owner, and verifies that the grad of
# rref tensor equals to the given grad.
def _compare_owner_value(context_id, rref, grad):
grads = dist_autograd.get_gradients(context_id)
x = grads[rref.local_value()]
if x.is_sparse:
assert grad.is_sparse
x = x.to_dense()
grad = grad.to_dense()
else:
assert not grad.is_sparse
return torch.equal(x, grad)
def create_tensor():
return torch.ones((3, 3), requires_grad=True)
def build_sparse_tensor(coalesce=False, requires_grad=True, dtype=torch.float32):
i = [[0, 1, 1], [2, 0, 2]]
v = [3.2, 4.1, 5.3]
tensor = torch.sparse_coo_tensor(i, v, (3, 3), requires_grad=requires_grad, dtype=dtype)
if coalesce:
tensor = tensor.coalesce()
return tensor
@torch.jit.script
def create_torchscript_tensor() -> torch.Tensor:
return torch.ones((3, 3)).requires_grad_()
def my_py_add(t1, t2):
return torch.add(t1, t2)
def my_scalar_add(a, b):
return a + b
def my_rref_add(rref_t1, t2):
ret = torch.add(rref_t1.local_value(), t2)
return ret
@torch.jit.script
def my_script_add(t1, t2):
return torch.add(t1, t2)
@torch.jit.script
def my_script_ref_add(ref_t1: RRef[torch.Tensor], t2: torch.Tensor) -> torch.Tensor:
t1 = ref_t1.to_here()
return torch.add(t1, t2)
def my_nested_rref_add(dst, rref_t1, t2):
return rpc.rpc_sync(dst, my_rref_add, args=(rref_t1, t2))
def ret_requires_grad():
return requires_grad_tensor
def my_py_nested_call(t1, t2, dst, world_size, hops):
next_dst = (dst + 1) % world_size
if hops > 0:
return rpc.rpc_sync(
worker_name(next_dst),
my_py_nested_call,
args=(t1, t2, next_dst, world_size, hops - 1),
)
else:
return rpc.rpc_sync(worker_name(next_dst), my_py_add, args=(t1, t2))
# after dist autograd context is cleaned up, it should be cleaned up on other
# nodes. This helper allows timeout_seconds for those RPCs to be completed, and
# ensures that all the contexts have been cleaned up in that timeframe.any
def _all_contexts_cleaned_up(timeout_seconds=10):
global known_context_ids
start = time.time()
context_id_to_raised = set()
while (
time.time() - start < timeout_seconds
and context_id_to_raised != known_context_ids
):
for context_id in known_context_ids:
try:
dist_autograd._retrieve_context(context_id)
except RuntimeError:
context_id_to_raised.add(context_id)
# all contexts have been cleaned up if trying to retrieve any context resulted in a RuntimeError.
success = context_id_to_raised == known_context_ids
return success
# This function creates a dis atugorad context, run rpc_sync on the given ps,
# and then blocks until the ps has verified the grads are correctly accumulated.
def _run_trainer(rref_t1, t2, ps, rank_diff, sparse):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_rref_add, args=(rref_t1, t2))
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
dist_autograd.backward(context_id, [loss])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
# This function is the same as _run_trainer, except rpc calls torchscript
# function "my_script_ref_add" instead of python funciton "my_rref_add"
def _run_trainer_torchscript(rref_t1, t2, ps, rank_diff, sparse):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_script_ref_add, args=(rref_t1, t2))
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
dist_autograd.backward(context_id, [loss])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
class SimulateBackwardError(Function):
_simulate_error = True
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if SimulateBackwardError._simulate_error:
raise Exception("Simulate error on backward pass")
else:
return input
class ExecMode(Enum):
LOCAL = 1 # Run the operation locally.
RPC_SYNC = 2 # Run the operation using rpc_sync
REMOTE = 3 # Run the operation using remote.
RPC_ASYNC = 4 # Run the operation using rpc_async
# Common utils for both CPU and CUDA test suites
class CommonDistAutogradTest(RpcAgentTestFixture):
def _exec_func_with_dst(self, dst, exec_mode, method, *args):
if ExecMode.LOCAL == exec_mode:
if len(args) == 1 and isinstance(args[0], list):
return method(*args[0])
return method(*args)
elif ExecMode.RPC_SYNC == exec_mode:
return rpc.rpc_sync(worker_name(dst), method, args=(args))
elif ExecMode.REMOTE == exec_mode:
return rpc.remote(worker_name(dst), method, args=(args)).to_here()
elif ExecMode.RPC_ASYNC == exec_mode:
fut = rpc.rpc_async(worker_name(dst), method, args=(args))
return fut.wait()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
def _exec_func(self, exec_mode, method, *args):
return self._exec_func_with_dst(
self._next_rank(), exec_mode, method, *args
)
def _next_rank(self):
if hasattr(self, "dst_rank"):
self.dst_rank = (self.dst_rank + 1) % self.world_size
if self.dst_rank == self.rank:
return self._next_rank()
else:
self.dst_rank = (self.rank + 1) % self.world_size
return self.dst_rank
def _check_rpc_done(self, rank_distance):
_check_rpc_done(rank_distance)
def _verify_backwards(self, exec_mode, tensors, context_id, local_grads, *args):
if exec_mode == ExecMode.LOCAL:
torch.autograd.backward(tensors)
return [arg.grad for arg in args]
else:
self._verify_backwards_remote(tensors, context_id, local_grads, *args)
def _verify_backwards_remote(self, tensors, context_id, local_grads, *args):
dist_autograd.backward(context_id, tensors)
# Verify grads were accumulated appropriately.
grads = dist_autograd.get_gradients(context_id)
nargs = len(args)
ngrads = 0
for i in range(0, nargs):
if local_grads[i] is not None:
self.assertIn(args[i], grads)
self.assertEqual(local_grads[i], grads[args[i]])
ngrads += 1
else:
self.assertNotIn(args[i], grads)
self.assertEqual(ngrads, len(grads))
def _test_graph(self, fn, exec_mode, sparse):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor()
t2 = build_sparse_tensor()
else:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), fn, args=(t1, t2))
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), fn, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Verify graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# Verify graph for previous context id.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
# autograd context should be cleaned up by now.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._retrieve_context(context_id)
# No autograd context available.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._current_context()
# 3-layer nested calls
def _test_graph_for_py_nested_call(self, exec_mode, sparse):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
else:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
nest_dst_rank = (dst_rank + 1) % self.world_size
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
# Barrier to ensure all RPCs are done.
dist.barrier()
for rd in [1, 2, 3]:
rpc.rpc_sync(
worker_name((self.rank + rd) % self.world_size),
_set_rpc_done,
args=(context_id, rd),
)
# Barrier to ensure all set_rpc_done have completed.
dist.barrier()
# For self.rank, it has 4 graphs to verify
# One is for current context id when this rank send first rpc call.
# Second one is for prev context id when this rank make 1st nested
# call.
# Third one is for prev prev context id when this rank make
# 2nd nested call.
# Last one is for prev prev prev context id when this rank
# execute the torch.add() operator.
# Verify first graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Verify second graph for 1st nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# Verify third graph for 2nd nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[2])
self._verify_graph_for_nested_rpc_call(ctx)
# verify last graph for rpc call execution.
ctx = dist_autograd._retrieve_context(ctx_ids[3])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
# Rank0->Rank1->Rank0
def _test_graph_for_py_nested_call_itself(self, exec_mode, sparse):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
else:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name((self.rank + 1) % self.world_size),
_set_rpc_done,
args=(context_id, 1),
)
# For self.rank, it has 2 graphs to verify.
# One is for current context id when this rank send first rpc
# call and execute the torch.add() operator.
# Another one is for prev context id when this rank make
# nested call.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(2, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[1],
t1,
t2,
ret,
)
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[1])
# Verify two pairs of send and recv functions for nested
# call
self._check_rpc_done(1)
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
def _test_no_graph_with_tensors_not_require_grad(self, exec_mode, sparse):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor(requires_grad=False)
t2 = build_sparse_tensor(requires_grad=False)
else:
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.add, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
ctx = dist_autograd._current_context()
send_functions = ctx._send_functions()
self.assertEqual(len(send_functions), 0)
recv_functions = ctx._recv_functions()
self.assertEqual(len(recv_functions), 0)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# NB: RRef.to_here() always passes the autograd context to the
# the callee, as the caller does not know whether the return
# value would contain a requires_grad tensor or not.
#
# rpc/remote with udf (_set_rpc_done here) also always passes the
# autograd context to the callee due to the same reason.
self.assertNotEqual(-1, dist_autograd._retrieve_context(ctx_ids[1]))
dist.barrier()
def _test_rpc_complex_args(self, exec_mode, sparse):
with dist_autograd.context() as context_id:
num_tensors = 10
tensors = []
for i in range(num_tensors):
if sparse:
tensor = build_sparse_tensor(requires_grad=(i % 2 == 0))
else:
tensor = torch.ones(3, 3, requires_grad=(i % 2 == 0))
tensors.append(tensor)
dst_rank = self._next_rank()
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.stack, args=(tensors,)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.stack, args=(tensors,)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
self.assertEqual(torch.stack(tensors), ret)
# Verify appropriate tensors have been attached the autograd graph.
next_funcs = list(
dist_autograd._current_context()._send_functions().values()
)[0].next_functions
idx = 0
for i in range(len(next_funcs)):
self.assertEqual(
"torch::autograd::AccumulateGrad", next_funcs[i][0].name()
)
self.assertEqual(tensors[i], next_funcs[i][0].variable)
# Verify that the worker id has been recorded in the context
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(len(worker_ids), 1)
self.assertEqual(worker_ids, {dst_rank})
def context_cleanup_test_helper(self, rpc_args, func, nested=False):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
if nested:
dst_rank = (self.rank + 1) % self.world_size
nested_dst_rank = (dst_rank + 1) % self.world_size
dst_ranks = {dst_rank}
else:
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
if nested:
rpc.rpc_sync(
worker_name(nested_dst_rank),
_set_rpc_done,
args=(context_id, 2),
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
def _backward_no_grad_on_tensor(self, t1, t2, sparse):
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2))
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
self.assertIsNone(t1.grad)
self.assertIsNone(t2.grad)
# Now populate .grad with local autograd engine and
# verify dist autograd doesn't mess with it.
loss_local = torch.add(t1, t2)
if sparse:
loss_local = torch.sparse.sum(loss_local)
else:
loss_local = loss_local.sum()
loss_local.backward()
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
t1_grad_before = t1.grad
t2_grad_before = t2.grad
dist_autograd.backward(context_id, [loss])
self.assertEqual(t1_grad_before, t1.grad)
self.assertEqual(t2_grad_before, t2.grad)
# The current rank first creates a tensor on the rref_owner, and then passes
# the rref with another tensor to the callee to run either my_rref_add or
# my_nested_rref_add, depending on whether the callee is the rref owner.
# The grad of tensor lives on the current rank, and the grad of the rref
# tensor lives on the rref owner.
def _backward_rref(self, callee, rref_owner, t1, t2, local_grads, sparse):
local_ret = torch.add(t1, t2)
if sparse:
local_ret = torch.sparse.sum(local_ret)
else:
local_ret = local_ret.sum()
local_ret.backward()
with dist_autograd.context() as context_id:
if sparse:
rref_t1 = rpc.remote(
rref_owner, build_sparse_tensor, args=(False, True,)
)
else:
rref_t1 = rpc.remote(
rref_owner, _torch_ones, args=((3, 3),), kwargs={"requires_grad": True}
)
if callee == rref_owner:
rref = rpc.remote(callee, my_rref_add, args=(rref_t1, t2))
else:
rref = rpc.remote(
callee, my_nested_rref_add, args=(rref_owner, rref_t1, t2)
)
ret = rref.to_here()
if sparse:
ret = torch.sparse.sum(ret)
else:
ret = ret.sum()
dist_autograd.backward(context_id, [ret])
# verify grads on caller
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t2, grads)
self.assertEqual(grads[t2], t2.grad)
# verify grads on rref owner
self.assertTrue(
rpc.rpc_sync(
rref_owner,
_compare_owner_value,
args=(context_id, rref_t1, t1.grad),
)
)
# In this test, every rank will serve as a parameter server (ps) and a
# driver, and then kicks off trainers on the other three ranks. So, we have:
# ps = rank0 with trainers = rank1/2/3
# ps = rank2 with trainers = rank2/3/0
# ps = rank3 with trainers = rank3/0/1
# ps = rank4 with trainers = rank0/1/2
#
# These four test ps-trainer groups run on completely separate autograd
# graphs, but they share the same set of underlying RpcAgents.
def _test_trainer_ps(self, create_ref_fn, trainer_fn, sparse):
if sparse:
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
else:
t1 = torch.ones((3, 3), requires_grad=True)
t2 = torch.zeros((3, 3), requires_grad=True)
local_ret = torch.add(t1, t2)
if sparse:
torch.sparse.sum(local_ret).backward()
else:
local_ret.sum().backward()
# create rref on self
rref_t1 = rpc.remote(
worker_name(self.rank),
create_ref_fn,
args=())
# kick off forward and backward pass on three other workers (trainers)
rank_diffs = [1, 2, 3]
futures = []
for rank_diff in rank_diffs:
futures.append(
rpc.rpc_async(
worker_name((self.rank + rank_diff) % self.world_size),
trainer_fn,
args=(rref_t1, t2, worker_name(self.rank), rank_diff, sparse),
)
)
# check if the trainers have done with their backward pass
for rank_diff in rank_diffs:
self._check_rpc_done(rank_diff)
# trainers are done and holding the context for verification
accumulate_grad_func = None
for rank_diff in rank_diffs:
# make sure grads are accumulated for the same tensors and values
# are all correct
ctx_id = ctx_ids[rank_diff]
grads = dist_autograd.get_gradients(ctx_id)
local_t1 = rref_t1.to_here()
self.assertIn(local_t1, grads)
self.assertEqual(grads[local_t1], t1.grad)
# unblock trainers
_set_rpc_done(None, 0)
# wait until all trainers are done
torch.futures.wait_all(futures)
def _backward_multiple_round_trips(self, t1, t2, t3, t4, t5, local_grads, sparse):
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
# Multiple RPCs between different nodes.
val = self._exec_func(exec_mode, torch.add, t1, t2)
val = self._exec_func(exec_mode, torch.mul, t3, val)
s1 = self._exec_func(exec_mode, torch.stack, (t4, val))
s2 = self._exec_func(exec_mode, torch.stack, (t5, val))
if sparse:
val = self._exec_func(exec_mode, torch.mul, s1, s2)
val = self._exec_func(exec_mode, torch.mul, val, val)
loss = torch.sparse.sum(val)
else:
val = self._exec_func(exec_mode, torch.bmm, s1, s2)
val = self._exec_func(exec_mode, torch.matmul, val, val)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3, t4, t5
)
local_grads = ret if ret else local_grads
def _backward_different_dtypes(self, t1, t2, sparse):
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
loss = self._exec_func(exec_mode, torch.add, t1, t2)
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
# Run the same code locally and with dist autograd and verify gradients
# are same.
def _backward_simple_python_udf(self, t1, t2, sparse):
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, my_py_add, t1, t2)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
# Run the same code locally and with dist autograd and verify gradients
# are same.
def _backward_simple_script_call(self, t1, t2, sparse):
local_grads = None
for exec_mode in [
ExecMode.LOCAL,
ExecMode.RPC_SYNC,
ExecMode.RPC_ASYNC,
ExecMode.REMOTE,
]:
with dist_autograd.context() as context_id:
forward_ret = self._exec_func(exec_mode, my_script_add, t1, t2)
if sparse:
loss = torch.sparse.sum(forward_ret)
else:
loss = forward_ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
def _nested_backward_accumulate_grads(self, t1, t2, sparse):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._test_nested_backward_accumulate_grads,
args=(t1, t2, self._next_rank()),
)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
# Run backward twice.
dist_autograd.backward(context_id, [loss], retain_graph=True)
dist_autograd.backward(context_id, [loss])
def _backwards_nested_python_udf(self, t1, t2, sparse):
t3 = t1 * t2
t4 = t1 + t2
res = t3 + t4
loss = t1 * t2 * t3 * t4 * res
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
torch.autograd.backward([loss])
# Now run distributed autograd.
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_python_udf,
args=(t1, t2, self._next_rank()),
)
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
dist_autograd.backward(context_id, [loss])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
def _mixed_requires_grad(self, t1, t2, sparse):
for exec_mode in [ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._mixed_requires_grad_operaton, t1, t2
)
self.assertEqual(t1 * t2, ret)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
dist_autograd.backward(context_id, [loss])
self.assertTrue(t1.requires_grad)
self.assertFalse(t2.requires_grad)
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t1, grads)
self.assertNotIn(t2, grads)
self.assertEqual(t2, grads[t1])
def _multiple_backward(self, t1, t2, sparse):
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2))
if sparse:
loss = torch.sparse.sum(loss)
else:
loss = loss.sum()
# Run backward in a loop multiple times.
for i in range(1000):
dist_autograd.backward(context_id, [loss], retain_graph=True)
# For current context, this rank sends t1 and t2 tensors to dst_rank,
# then get t3 = torch.add(t1, t2) result tensor.
# For the current context in this rank, it expects graph like this:
# send function:
# rpcSendBackward
# / \
# t1.AccumulateGrad t2.AccumulateGrad
#
# recv function:
#
# |
# t3.rpcRecvBackward
#
def _verify_graph_for_first_rpc_call(
self, send_function, recv_function, t1, t2, ret
):
# Retrieve the next functions in the graph.
next_funcs = send_function.next_functions
self.assertEqual(2, len(next_funcs))
# We should now hit t1 and t2 in the autograd graph.
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[0][0].name())
self.assertEqual(t1, next_funcs[0][0].variable)
self.assertEqual(0, next_funcs[0][1])
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[1][0].name())
self.assertEqual(t2, next_funcs[1][0].variable)
self.assertEqual(0, next_funcs[1][1])
# Test recv functions.
self.assertEqual(ret.grad_fn, recv_function)
# Run the same code locally and with dist autograd and verify gradients
# are same.
def _backward_simple(self, dst, t1, t2, local_grads, sparse):
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func_with_dst(
dst, exec_mode, torch.add, t1, t2
)
if sparse:
loss = torch.sparse.sum(ret)
else:
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, executes torch.add(t1, t2) and sends
# result tensor t3 back.
# For this context in this rank, it expects graph like this:
# send and recv functions:
# rpcSendBackward
# |
# t3.AddBackward0
# / \
# t1.recvRpcBackward t2.recvRpcBackward
def _verify_graph_for_rpc_call_exec(self, send_function):
# Verify next function is AddBackward0
next_funcs = send_function.next_functions
self.assertEqual(1, len(next_funcs))
add_backward_fn = next_funcs[0][0]
self.assertEqual("AddBackward0", add_backward_fn.name())
# Verify the next two functions are the same recv backward function.
next_funcs = add_backward_fn.next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, forwards t1 and t2 tensors using
# nested rpc call to next dst. In return route, receive result tensor t3
# from next dst and forwarding t3 back to previous calls.
# For this context in this rank, it expects graph like this:
# send and recv functions for receiving and forwarding t1 and t2:
# rpcSendBackward
# / \
# t1.recvRpcBackward t2.recvRpcBackward
# send and recv functions for receiving and forwarding t3:
# rpcSendBackward
# |
# t3.recvRpcBackward
def _verify_graph_for_nested_rpc_call(self, ctx):
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
# For send function when making nest rpc call,
# next functions of the send function are two recv functions
# for received two tensors from previous call
next_funcs = list(send_functions.values())[0].next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For send function when returning resonpose to previous call
# next function of the send function is the recv function
# for received tensor result returned from nested call
next_funcs = list(send_functions.values())[1].next_functions
self.assertEqual(1, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
class TensorPipeAgentDistAutogradTest(CommonDistAutogradTest):
# Sparse tests only work with TensorPipeAgent.
@dist_init
def test_graph_for_builtin_call_sparse(self):
self._test_graph(torch.add, ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_python_call_sparse(self):
self._test_graph(my_py_add, ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_builtin_remote_call_sparse(self):
self._test_graph(torch.add, ExecMode.REMOTE, True)
@dist_init
def test_graph_for_python_remote_call_sparse(self):
self._test_graph(my_py_add, ExecMode.REMOTE, True)
@dist_init
def test_graph_for_py_nested_call_sparse(self):
self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_py_nested_remote_call_sparse(self):
self._test_graph_for_py_nested_call(ExecMode.REMOTE, True)
@dist_init
def test_graph_for_py_nested_call_itself_sparse(self):
self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_py_nested_remote_call_itself_sparse(self):
self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, True)
@dist_init
def test_no_graph_with_tensors_not_require_grad_sparse(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, True)
@dist_init
def test_no_graph_with_tensors_not_require_grad_remote_sparse(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, True)
@dist_init
def test_rpc_complex_args_sparse(self):
self._test_rpc_complex_args(ExecMode.RPC_SYNC, True)
@dist_init
def test_remote_complex_args_sparse(self):
self._test_rpc_complex_args(ExecMode.REMOTE, True)
@dist_init
def test_context_cleanup_tensor_with_grad_sparse(self):
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_context_cleanup_tensor_no_grad_sparse(self):
t1 = build_sparse_tensor(requires_grad=False)
self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add)
@dist_init
def test_context_cleanup_nested_rpc_sparse(self):
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
dst_rank = (self.rank + 1) % self.world_size
args = (t1, t2, dst_rank, self.world_size, 0)
self.context_cleanup_test_helper(
rpc_args=args, func=my_py_nested_call, nested=True
)
@dist_init
def test_backward_no_grad_on_tensor_sparse(self):
self._backward_no_grad_on_tensor(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_backward_simple_sparse(self):
self._backward_simple(
self._next_rank(),
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_simple_self_sparse(self):
self._backward_simple(
self.rank,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_rref_multi_sparse(self):
if self.rank > 0:
callee = "worker0"
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_rref_sparse(self):
callee = worker_name(self._next_rank())
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_rref_nested_sparse(self):
callee = worker_name((self.rank + 1) % self.world_size)
rref_owner = worker_name((self.rank + 2) % self.world_size)
self._backward_rref(
callee,
rref_owner,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_trainer_ps_sparse(self):
self._test_trainer_ps(
build_sparse_tensor,
_run_trainer,
True
)
@dist_init
def test_backward_multiple_round_trips_sparse(self):
self._backward_multiple_round_trips(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=False),
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=False),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_different_dtypes_sparse(self):
self._backward_different_dtypes(
build_sparse_tensor(requires_grad=True, dtype=torch.float32),
build_sparse_tensor(requires_grad=True, dtype=torch.float64),
True
)
@dist_init
def test_backward_simple_python_udf_sparse(self):
self._backward_simple_python_udf(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_backward_simple_script_call_sparse(self):
self._backward_simple_script_call(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_nested_backward_accumulate_grads_sparse(self):
self._nested_backward_accumulate_grads(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_backwards_nested_python_udf_sparse(self):
# Run equivalent of _nested_python_udf locally.
self._backwards_nested_python_udf(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_mixed_requires_grad_sparse(self):
self._mixed_requires_grad(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=False),
True
)
@dist_init
def test_multiple_backward_sparse(self):
self._multiple_backward(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
@dist_init
def test_embedding_bag_with_no_grad_tensors(self):
dst = self._next_rank()
remote_embedding = rpc.remote(
worker_name(dst),
torch.nn.EmbeddingBag,
args=(16, 16),
kwargs={"mode": "sum", "sparse": True},
)
local_embedding = torch.nn.EmbeddingBag(16, 16, mode="sum", sparse=True)
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
# requires_grad = True to record send/recv functions
per_sample_weights = torch.rand((8), requires_grad=True)
offsets = torch.LongTensor([0, 4])
local_res = local_embedding(input, offsets, per_sample_weights)
# Run backward twice.
torch.autograd.backward([local_res.sum()], retain_graph=True)
torch.autograd.backward([local_res.sum()])
local_grad = local_embedding.weight.grad
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._call_remote_embedding,
args=(remote_embedding, input, offsets, per_sample_weights),
)
# Run backward twice to test accumulation of sparse gradients.
dist_autograd.backward(context_id, [res.sum()], retain_graph=True)
dist_autograd.backward(context_id, [res.sum()])
remote_grad = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._get_grad,
args=(remote_embedding, context_id),
)
self.assertEqual(local_grad, remote_grad)
class DistAutogradTest(CommonDistAutogradTest):
@dist_init
def test_autograd_context(self):
# Verify max possible id.
max_auto_increment = 281474976710655
self.assertEqual(
max_auto_increment + (self.worker_id << 48), dist_autograd._get_max_id()
)
context_ids = []
for i in range(200):
with dist_autograd.context() as context_id:
self.assertEqual(
context_id,
dist_autograd._retrieve_context(context_id)._context_id(),
)
# First 16 bits should be worker_id.
self.assertEqual(self.worker_id, context_id >> 48)
context_ids.append(context_id)
for context_id in context_ids:
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd._retrieve_context(context_id)
@dist_init
def test_nested_context(self):
with dist_autograd.context() as context_id:
# Nested contexts not supported.
with self.assertRaisesRegex(
RuntimeError, "Already have an autograd context id for this thread"
):
with dist_autograd.context() as context_id:
pass
@dist_init
def test_graph_for_builtin_call(self):
self._test_graph(torch.add, ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_python_call(self):
self._test_graph(my_py_add, ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_builtin_remote_call(self):
self._test_graph(torch.add, ExecMode.REMOTE, False)
@dist_init
def test_graph_for_python_remote_call(self):
self._test_graph(my_py_add, ExecMode.REMOTE, False)
@dist_init
def test_graph_for_py_nested_call(self):
self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_py_nested_remote_call(self):
self._test_graph_for_py_nested_call(ExecMode.REMOTE, False)
@dist_init
def test_graph_for_py_nested_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_py_nested_remote_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, False)
@dist_init
def test_no_graph_with_tensors_not_require_grad(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, False)
@dist_init
def test_no_graph_with_tensors_not_require_grad_remote(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, False)
def _test_grad_only_on_return_value(self, exec_mode):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), ret_requires_grad)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), ret_requires_grad
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
dist_autograd.backward(context_id, [ret.sum()])
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
grads = dist_autograd.get_gradients(ctx_ids[1])
self.assertEqual(1, len(grads))
self.assertIn(requires_grad_tensor, grads)
self.assertEqual(torch.ones_like(ret), grads[requires_grad_tensor])
# due to the above get_gradients call, ensure that dist autograd
# contexts aren't cleaned up until all workers exit context managers
dist.barrier()
@dist_init
def test_grad_only_on_return_value(self):
self._test_grad_only_on_return_value(ExecMode.RPC_SYNC)
@dist_init
def test_grad_only_on_return_value_remote(self):
self._test_grad_only_on_return_value(ExecMode.REMOTE)
@dist_init
def test_rpc_complex_args(self):
self._test_rpc_complex_args(ExecMode.RPC_SYNC, False)
@dist_init
def test_remote_complex_args(self):
self._test_rpc_complex_args(ExecMode.REMOTE, False)
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_context_cleanup_tensor_no_grad(self):
t1 = torch.ones(3, 3, requires_grad=False)
self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add)
@dist_init
def test_context_cleanup_no_tensors(self):
self.context_cleanup_test_helper(rpc_args=(1, 1), func=my_scalar_add)
@dist_init
def test_context_cleanup_nested_rpc(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
dst_rank = (self.rank + 1) % self.world_size
args = (t1, t2, dst_rank, self.world_size, 0)
self.context_cleanup_test_helper(
rpc_args=args, func=my_py_nested_call, nested=True
)
@dist_init
def test_worker_ids_recorded(self):
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
# if no tensors require grad, we should still record worker_ids, as
# the autograd context ID is still passed to other workers.
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
# worker_ids should be recorded when tensors do require grad
t1.requires_grad = True
t2.requires_grad = True
for dst_rank in dst_ranks:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
@dist_init
def test_dist_autograd_profiling(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(3, 3, requires_grad=True)
loss = rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t1, t2)).sum()
with torch.autograd.profiler.profile() as p:
dist_autograd.backward(context_id, [loss])
function_events = p.function_events
def get_event(partial_key):
return [event for event in function_events if partial_key in event.name][0]
send_event = get_event("SendRpcBackward")
recv_event = get_event("RecvRpcBackward")
backward_event = get_event("torch::distributed::autograd::backward")
# There should be at least 1 send and recv_events each, corresponding to send/recv functions executed.
self.assertEqual(send_event.count, 1)
self.assertEqual(recv_event.count, 1)
# The CPU total for backward event should be great than send and recv, since
# applying those functions in the backwards pass is a subset of the entire backward pass.
self.assertGreater(backward_event.cpu_time_total, send_event.cpu_time_total)
self.assertGreater(backward_event.cpu_time_total, recv_event.cpu_time_total)
@dist_init
def test_error_in_context(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(6, 6, requires_grad=True)
with self.assertRaises(RuntimeError):
# This should throw an error since matrix sizes don't match.
rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
@dist_init
def test_backward_no_grad_on_tensor(self):
self._backward_no_grad_on_tensor(
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
False
)
@dist_init
def test_backward_simple(self):
self._backward_simple(
self._next_rank(),
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_simple_self(self):
self._backward_simple(
self.rank,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_rref(self):
callee = worker_name(self._next_rank())
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_rref_multi(self):
if self.rank > 0:
callee = "worker0"
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_rref_nested(self):
callee = worker_name((self.rank + 1) % self.world_size)
rref_owner = worker_name((self.rank + 2) % self.world_size)
self._backward_rref(
callee,
rref_owner,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_trainer_ps(self):
self._test_trainer_ps(
create_tensor,
_run_trainer,
False
)
@dist_init
def test_trainer_ps_torchscript_functions(self):
# TODO, need more investigation
# there is rref leak when shutting down, suspect it is because
# ref as arg is passed to pybind boundary, and the ref is not garbage
# collected by python when calling shutdown()
import torch.distributed.rpc.api as api
api._ignore_rref_leak = True
self._test_trainer_ps(create_torchscript_tensor, _run_trainer_torchscript, False)
@dist_init
def test_backward_multiple_round_trips(self):
self._backward_multiple_round_trips(
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3)),
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3)),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_different_tensor_dims(self):
local_grads = None
t1 = torch.rand((4, 6), requires_grad=True)
t2 = torch.rand((6, 5))
t3 = torch.rand((5, 7), requires_grad=True)
t4 = torch.rand((7, 9))
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
val = self._exec_func(exec_mode, torch.matmul, t1, t2)
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (val, t3, t4))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t2, t3, t4
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_unused_tensors(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
s = self._exec_func(exec_mode, torch.stack, (t1, t2, t3))
val = self._exec_func(
exec_mode,
torch.matmul,
torch.narrow(s, 0, 0, 1),
torch.narrow(s, 0, 2, 1),
)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_multiple_output_tensors(self):
local_grads = None
t = torch.rand((10, 2), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
tensor_list = self._exec_func(exec_mode, torch.split, t, 2)
t1 = tensor_list[0]
t2 = tensor_list[2]
t3 = tensor_list[4]
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (t1, t2, t3))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t
)
local_grads = ret if ret else local_grads
def _run_test_backward_unused_send_function_in_thread(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# We don't use the result of an RPC function, as a result the
# backward pass would hang in the "FAST" mode.
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
val = torch.mul(t1, t2)
# Run backward, this would hang forever.
dist_autograd.backward(context_id, [val.sum()])
@dist_init
def test_backward_unused_send_function(self):
# Run the test in a thread which would never finish.
t = threading.Thread(
target=self._run_test_backward_unused_send_function_in_thread
)
t.daemon = True
t.start()
t.join(10) # Wait for 10s.
# Verify thread is still alive (indicating backward hasn't completed yet).
self.assertTrue(t.is_alive())
@dist_init
def test_backward_autograd_engine_error(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# Perform some ops before error simulation.
tmp = (t1 + t2) * (t1 + t2)
t3 = SimulateBackwardError.apply(tmp)
# Run multiple round trips across different nodes and verify the
# original node receives an error thrown on a node deep in the chain.
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t2, t3)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.mul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.div, args=(val, t2)
)
with self.assertRaisesRegex(
RuntimeError, "Error on Node [0-9]+: Simulate error on backward pass"
):
# Run backwards, and validate we receive an error.
dist_autograd.backward(context_id, [val.sum()])
@dist_init(clean_shutdown=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure(self):
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
# Wait for all RPCs to be done.
dist.barrier()
# Kill all odd rank nodes.
if self.rank % 2 == 0:
shutdown_error_regex = self.get_shutdown_error_regex()
# Wait for all other nodes to die.
for rank in range(self.world_size):
if rank % 2 != 0:
wait_until_node_failure(rank, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex()
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since all
# other nodes are dead.
dist_autograd.backward(context_id, [res.sum()])
else:
# Exit all other nodes.
pass
@dist_init
def test_backward_without_context(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
dist_autograd.backward(context_id, [res.sum()])
@dist_init
def test_backward_without_rpc(self):
dst_rank = self.rank
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.add(t1, t2)
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(torch.ones(3, 3), grads[t1])
self.assertEqual(torch.ones(3, 3), grads[t2])
@dist_init
def test_backward_invalid_args(self):
with dist_autograd.context() as context_id:
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(context_id, None)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(None, None)
with self.assertRaisesRegex(
RuntimeError, "No tensors provided for gradient computation"
):
dist_autograd.backward(context_id, [])
with self.assertRaisesRegex(RuntimeError, "requires_grad not set on"):
t = torch.rand(3, 3)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "is not a scalar, all roots need to be scalar"
):
t = torch.rand(3, 3, requires_grad=True)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "does not have a valid gradient function"
):
t = torch.rand(1, requires_grad=True)
dist_autograd.backward(context_id, [t])
@dist_init
def test_backward_multiple_roots(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
r1 = self._exec_func(exec_mode, torch.add, t1, t2).sum()
r2 = self._exec_func(exec_mode, torch.mul, t1, t2).sum()
r3 = self._exec_func(exec_mode, torch.cos, t1).sum()
r4 = self._exec_func(exec_mode, torch.div, t1, t2).sum()
local_grads = self._verify_backwards(
exec_mode, [r1, r2, r3, r4], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_different_dtypes(self):
self._backward_different_dtypes(
torch.rand((3, 3), requires_grad=True, dtype=torch.float32),
torch.rand((3, 3), requires_grad=True, dtype=torch.float64),
False
)
@dist_init
def test_backward_simple_python_udf(self):
self._backward_simple_python_udf(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False
)
@dist_init
def test_backward_simple_script_call(self):
self._backward_simple_script_call(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False
)
@staticmethod
def _complex_python_udf(t1, t2):
t3 = torch.nn.functional.linear(t1, t2)
t4 = torch.nn.functional.linear(t2, t3)
t5 = torch.nn.functional.linear(t3, t4)
return torch.linalg.multi_dot([t1, t2, t3, t4, t5])
@dist_init
def test_backward_complex_python_udf(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._complex_python_udf, t1, t2
)
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@staticmethod
def _python_udf_with_backward_error(t1, t2):
t3 = t1 + t2
t4 = SimulateBackwardError.apply(t3)
return torch.linalg.multi_dot([t1, t2, t3, t4])
@staticmethod
def _nested_rpc_call_backward_error(t1, t2, dst):
t1 = t1 * t2
t2 = t1 + t2
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2),
)
return torch.linalg.multi_dot([t1, t2, res])
@dist_init
def test_backward_python_udf_error(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_rpc_call_backward_error,
args=(t1, t2, self._next_rank()),
)
with self.assertRaisesRegex(
RuntimeError, "Simulate error on backward pass"
):
dist_autograd.backward(context_id, [loss.sum()])
_backward_done = False
@dist_init(clean_shutdown=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure_python_udf(self):
# Set a short timeout to quickly time out failed RPCs.
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
dst = self._next_rank()
res = rpc.rpc_sync(
worker_name(dst),
my_py_nested_call,
args=(t1, t2, dst, self.world_size, 1),
)
dist.barrier()
# Kill rank 2 (last hop of nested rpc) and verify rank 0 receives an error.
if self.rank == 2:
return
store = dist.distributed_c10d._get_default_store()
if self.rank == 0:
# Wait for rank 2 to die.
shutdown_error_regex = self.get_shutdown_error_regex()
wait_until_node_failure(2, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex().
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since rank 2 is dead.
dist_autograd.backward(context_id, [res.sum()])
# Mark rank 0 is done in the store, since the RPC framework on
# some nodes might be broken at this point.
store.set('test_backward_node_failure_python_udf_rank0_done', "True")
else:
# Wait for backward to finish on rank 0.
store.wait(['test_backward_node_failure_python_udf_rank0_done'], timedelta(seconds=10))
@staticmethod
def _nested_python_udf(t1, t2, dst):
t3 = t1 * t2
t4 = t1 + t2
res = rpc.rpc_sync(worker_name(dst), my_py_add, args=(t3, t4))
return t1 * t2 * t3 * t4 * res
@dist_init
def test_backwards_nested_python_udf(self):
# Run equivalent of _nested_python_udf locally.
self._backwards_nested_python_udf(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False
)
_test_clean_context_backward_context_id = None
class MyBackwardFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
assert DistAutogradTest._test_clean_context_backward_context_id is not None
# Release the context to simulate error (use barrier before releasing
# context to ensure all nodes execute the backward function).
dist.barrier()
dist_autograd._release_context(
DistAutogradTest._test_clean_context_backward_context_id
)
# Verify all contexts are cleaned up.
assert _all_contexts_cleaned_up()
return input
@dist_init
def test_clean_context_during_backward(self):
"""
This test simulates the situation where the 'backward' call might throw
an exception locally which would lead to the autograd context being
cleaned up if we're using the context manager. As a result, the autograd
context might be cleaned up while some threads are still using the
autograd context.
It is fine for the 'backward' call to throw an exception in this test,
but the process should not crash.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
context = dist_autograd._new_context()
context_id = context._context_id()
DistAutogradTest._test_clean_context_backward_context_id = context_id
# Send the context id to all nodes.
for i in range(0, self.world_size):
if i != self.rank:
rank_distance = (i - self.rank + self.world_size) % self.world_size
rpc.rpc_sync(
worker_name(i),
_set_rpc_done,
args=(context_id, rank_distance),
)
dist.barrier()
# Verify all context ids have been received.
self.assertEqual(self.world_size - 1, len(known_context_ids))
t1 = torch.rand((3, 3), requires_grad=True)
for i in range(0, 100):
dst = self._next_rank()
t1 = rpc.rpc_sync(worker_name(dst), torch.add, args=(t1, t1))
# Call MyBackwardFunc as the first op of the backward pass to
# ensure we release the context early in the backward pass.
t1 = DistAutogradTest.MyBackwardFunc.apply(t1)
self.assertEqual(100, len(context._send_functions()))
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd.backward(context_id, [t1.sum()])
# HACK: Killing workers since otherwise the autograd engine gets stuck on
# other nodes. The proper fix would be addressing:
# https://github.com/pytorch/pytorch/issues/27643, which would inform
# other nodes about the failure.
# The autograd engine gets stuck on other nodes since they're waiting to
# receive gradients from the node that received an error (and as a
# result it didn't execute the rest of the graph).
dist.barrier()
rpc.shutdown(graceful=False)
sys.exit(0)
@classmethod
def _call_remote_embedding(cls, embedding_rref, input, offsets, per_sample_weights):
embedding = embedding_rref.local_value()
return embedding(input, offsets, per_sample_weights)
@classmethod
def _get_grad(cls, embedding_rref, context_id):
embedding = embedding_rref.local_value()
grad_map = dist_autograd.get_gradients(context_id)
return grad_map[embedding.weight]
@classmethod
def _mixed_requires_grad_operaton(cls, t1, t2):
if t2.requires_grad:
return t1 - t2
else:
return t1 * t2
@dist_init
def test_mixed_requires_grad(self):
self._mixed_requires_grad(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=False),
False
)
class TestDebugInfoFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
backward_passes = int(debug_info["num_current_backward_passes"])
# Hard to validate exact numbers because of the distributed nature.
# We can't use a barrier() here since that would block the single
# CPU thread available for autograd and can cause deadlocks.
assert backward_passes >= 1 and backward_passes <= 4
return input
@dist_init
def test_debug_info(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
i = 0
res = {}
res[i] = t1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
# Call custom function in middle of backward pass to ensure all
# nodes are still waiting on a backward().
res[i + 1] = DistAutogradTest.TestDebugInfoFunc.apply(res[i])
i += 1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
dist_autograd.backward(context_id, [res[i].sum()])
debug_info = dist_autograd._get_debug_info()
num_autograd_context = int(debug_info["num_autograd_contexts"])
# Need atleast one context and not more than 4.
self.assertTrue(num_autograd_context >= 1 and num_autograd_context <= 4)
for rd in range(self.world_size - 1):
rpc.rpc_sync(
worker_name((self.rank + rd + 1) % self.world_size),
_set_rpc_done,
args=(context_id, rd + 1),
)
dist.barrier()
# Validate information
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
self.assertEqual(0, int(debug_info["num_current_backward_passes"]))
# only have `num_current_backward_passes` and `num_autograd contexts`
self.assertTrue(len(debug_info) == 2)
self.assertTrue(_all_contexts_cleaned_up())
# All contexts should be cleaned up.
debug_info = dist_autograd._get_debug_info()
self.assertEqual(0, int(debug_info["num_autograd_contexts"]))
@staticmethod
def _workload_thread():
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = rpc.rpc_sync("worker0", torch.add, args=(t1, t2))
t4 = rpc.rpc_sync("worker0", torch.mul, args=(t2, t3))
t5 = rpc.rpc_sync("worker0", torch.matmul, args=(t3, t4))
t6 = rpc.rpc_sync("worker0", torch.add, args=(t4, t5))
dist_autograd.backward(context_id, [t6.sum()])
@dist_init
def test_async_dist_autograd(self):
"""
This test ensures async processing for distributed autograd works
appropriately. This is achieved by spawning multiple threads and
hammering a single node with a lot of backward() calls.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank != 0:
# All other ranks schedule work on rank 0.
threads = []
for i in range(20):
t = threading.Thread(target=DistAutogradTest._workload_thread)
t.start()
threads.append(t)
for thread in threads:
thread.join()
dist.barrier()
@dist_init
def test_backward_accumulate_grads(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = torch.matmul(t1, t2)
# Run backward twice.
torch.autograd.backward([t3.sum()], retain_graph=True)
torch.autograd.backward([t3.sum()])
t3 = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
# Run backward twice.
dist_autograd.backward(context_id, [t3.sum()], retain_graph=True)
dist_autograd.backward(context_id, [t3.sum()])
# Verify the gradients are same for local and remote execution.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@staticmethod
def _test_nested_backward_accumulate_grads(t1, t2, dst_rank):
return rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
@dist_init
def test_nested_backward_accumulate_grads(self):
self._nested_backward_accumulate_grads(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False
)
@dist_init
def test_multiple_backward(self):
self._multiple_backward(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True),
False
)
@dist_init(clean_shutdown=False)
def test_multiple_backward_with_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
'worker{}'.format(self._next_rank()),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2)).sum()
try:
# Run backward in a loop multiple times.
for i in range(100):
if i < 50:
with self.assertRaisesRegex(RuntimeError, "Simulate error on backward pass"):
dist_autograd.backward(context_id, [loss], retain_graph=True)
elif i > 50:
# Recovered from error.
dist_autograd.backward(context_id, [loss], retain_graph=True)
else:
dist.barrier()
SimulateBackwardError._simulate_error = False
dist.barrier()
finally:
# Sync before resetting flag.
dist.barrier()
# Reset the flag.
SimulateBackwardError._simulate_error = True
@dist_init
def test_backward_verify_hooks(self):
t1 = torch.ones((3, 3), requires_grad=True)
# Double the gradient.
t1.register_hook(lambda grad: grad * 2)
t2 = torch.ones((3, 3), requires_grad=True)
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, torch.matmul, t1, t2)
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_no_grad_copy(self):
'''
Similar to test in test_autograd.py.
'''
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class MyFuncSingleGrad(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFuncSingleGrad.static_grad_ptr = grad.data_ptr()
return grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [NonContGradFunc.apply(MyFunc.apply(a, b))])
grads = dist_autograd.get_gradients(context_id)
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for a
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFuncSingleGrad.apply(a)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFuncSingleGrad.static_grad_ptr
p_a = grads[a].data_ptr()
# Verify there was no clone.
self.assertTrue(p_a == p_g)
# Test case that should trigger copy for both of a,b. This is
# different in the distributed autograd case since we hold
# a reference to all grads in a vector until all accumulation is done.
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFunc.apply(a, b)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a].data_ptr()
p_b = grads[b].data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# both should be copied.
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
@dist_init
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for a.
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
# check a uses the same buffer
self.assertTrue(p_a == p_g)
# Run backwards multiple times.
for i in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
with dist_autograd.context() as context_id:
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = NonContGradFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
p_b = grads[b]._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to verify accumulation.
for i in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
@dist_init
def test_grad_copy_sparse_indices_extra_ref(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
static_grad_indices_ref = None
static_grad_values_ref = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
# indices() and values() return views, so holding onto
# references of them would not increment refcount of indices
# and values inside the sparse tensor.
MyFunc.static_grad_indices_ref = grad._indices()
MyFunc.static_grad_values_ref = grad._values()
return grad
a = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
self.assertIsNotNone(MyFunc.static_grad_indices_ref)
self.assertIsNotNone(MyFunc.static_grad_values_ref)
# grad would be stolen, since static_grad_indices_ref and
# static_grad_values_ref are holding onto views and don't bump the
# refcount.
self.assertTrue(p_g == p_a)
@dist_init
def test_post_hooks(self):
self.hook_called_times = 0
def post_hook_add_one(output_grads, input_grads):
self.hook_called_times += 1
return output_grads
def post_hook_add_two(output_grads, input_grads):
self.hook_called_times += 2
return output_grads
t = torch.rand(10, 10, requires_grad=True)
a = t + t
# Register post hooks
accumulate_grad_0 = a.grad_fn.next_functions[0][0]
accumulate_grad_0.register_hook(post_hook_add_one)
accumulate_grad_0.register_hook(post_hook_add_two)
accumulate_grad_1 = a.grad_fn.next_functions[1][0]
accumulate_grad_1.register_hook(post_hook_add_two)
with dist_autograd.context() as context_id:
loss = a.sum()
dist_autograd.backward(context_id, [loss])
self.assertEqual(5, self.hook_called_times)
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(1, len(grads))
self.assertTrue(t in grads)
@staticmethod
def _slow_add(t1, t2):
time.sleep(1)
t3 = t1 + t2
t3.requires_grad = True
return t3
@dist_init
def test_thread_local_context_id(self):
t1 = torch.rand((3, 3))
t2 = torch.rand((3, 3))
t3 = t1 + t2
t3.requires_grad = True
t3.sum().backward()
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, DistAutogradTest._slow_add, args=(t1, t2))
with dist_autograd.context() as context_id:
loss = rref.to_here().sum()
# due to slow add, the continuation of this backward pass will be
# invoked by the previous rpc.remote thread which does not have a
# valid context_id. So, this can test whether we propagate
# thread_local states properly when jumping across threads on the
# server side.
dist_autograd.backward(context_id, [loss])
self.assertTrue(
rpc.rpc_sync(
dst,
_compare_owner_value,
args=(context_id, rref, t3.grad)
)
)
class CudaDistAutogradTest(CommonDistAutogradTest):
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_simple(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
(t1 + t2).sum().backward()
with dist_autograd.context() as context_id:
t3 = t1 + t2
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True)
# Run a few iterations.
for i in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
t7 = self._exec_func(exec_mode, torch.add, t6.cpu(), t5)
# Autograd graph consists of CPU -> GPU -> CPU execution.
ret = self._verify_backwards(
exec_mode, [t7.sum()], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation_gpu_root(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True)
# Run a few iterations.
for i in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
# Autograd graph consists of CPU -> GPU -> CPU execution.
ret = self._verify_backwards(
exec_mode, [t6.sum()], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
class FaultyAgentDistAutogradTest(RpcAgentTestFixture):
# Reusing a simplified helper function from DistAutogradTest to ensure
# autograd context is successfully cleaned up even when RPCs are failing.
def context_cleanup_test_helper(self, rpc_args, func):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
class WrapperModule(nn.Module):
def __init__(self, model, device):
super().__init__()
self.model = model.to(device)
def forward(self, *args):
return self.model(*args)
def gradients(self, ctx_id):
grads = dist_autograd.get_gradients(ctx_id)
return [grads[p] for p in self.model.parameters()]
class TensorPipeCudaDistAutogradTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(4)
def test_device_maps_backward_pass(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
# The reverse of this device mapping should be used for the backward pass.
options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t1 = torch.rand(10, device=self.rank, requires_grad=True)
t2 = torch.rand(10, device=self.rank, requires_grad=True)
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(dst, torch.add, args=(t1, t2))
dist_autograd.backward(context_id, [res.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(torch.ones(10), grads[t1])
self.assertEqual(torch.ones(10), grads[t2])
self.assertEqual(t1.device, grads[t1].device)
self.assertEqual(t2.device, grads[t2].device)
rpc.shutdown()
class MyRemoteCompute(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
input = input * 2.0
return input
class MyLocalCompute(torch.nn.Module):
def __init__(self, next_stage):
super().__init__()
self.next_stage = next_stage
def forward(self, input):
return self.next_stage.rpc_sync().forward(input)
@skip_if_lt_x_gpu(4)
def test_dist_autograd_sync_streams(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
# The reverse of this device mapping should be used for the backward pass.
options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
remote_compute = rpc.remote(dst, TensorPipeCudaDistAutogradTest.MyRemoteCompute)
local_compute = TensorPipeCudaDistAutogradTest.MyLocalCompute(remote_compute)
for _ in range(10):
input = torch.rand([1000, 10000], device=self.rank, requires_grad=True)
# Run local autograd
result = input * 2.0
r = random.random()
loss = result.sum() * r
loss.backward()
# Run distributed autograd
with dist_autograd.context() as context_id:
result = local_compute(input)
loss = result.sum() * r
dist_autograd.backward(context_id, [loss])
# Compare grads.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(input.grad, grads[input])
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_gradients_synchronizations(self):
options = self.rpc_backend_options
for peer_rank in range(self.world_size):
options.set_device_map(worker_name(peer_rank), {self.rank: peer_rank})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# this is master
layers = [nn.Linear(2000, 2000) for _ in range(self.world_size - 1)]
local_layers = [l.to(0) for l in layers]
remote_layers = []
for rank in range(1, self.world_size):
remote_layers.append(rpc.remote(
worker_name(rank),
WrapperModule,
args=(layers[rank - 1], rank)
))
x = torch.randn(5000, 2000).to(0)
# local iteration
local_model = nn.Sequential(*local_layers)
local_model(x).sum().backward()
# remote iteration
with dist_autograd.context() as context_id:
for remote_layer in remote_layers:
x = remote_layer.rpc_sync().forward(x)
dist_autograd.backward(context_id, [x.sum()])
futs = []
for remote_layer in remote_layers:
futs.append(remote_layer.rpc_async().gradients(context_id))
for i in range(len(futs)):
local_gradients = [p.grad for p in local_layers[i].parameters()]
for g1, g2 in zip(futs[i].wait(), local_gradients):
self.assertEqual(g1, g2)
rpc.shutdown()
| pytorch-master | torch/testing/_internal/distributed/rpc/dist_autograd_test.py |
import os
from abc import ABC, abstractmethod
import torch.testing._internal.dist_utils
class RpcAgentTestFixture(ABC):
@property
def world_size(self) -> int:
return 4
@property
def init_method(self):
use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None)
if use_tcp_init == "1":
master_addr = os.environ["MASTER_ADDR"]
master_port = os.environ["MASTER_PORT"]
return f"tcp://{master_addr}:{master_port}"
else:
return self.file_init_method
@property
def file_init_method(self):
return torch.testing._internal.dist_utils.INIT_METHOD_TEMPLATE.format(
file_name=self.file_name
)
@property
@abstractmethod
def rpc_backend(self):
pass
@property
@abstractmethod
def rpc_backend_options(self):
pass
def setup_fault_injection(self, faulty_messages, messages_to_delay):
"""Method used by dist_init to prepare the faulty agent.
Does nothing for other agents.
"""
pass
# Shutdown sequence is not well defined, so we may see any of the following
# errors when running tests that simulate errors via a shutdown on the
# remote end.
@abstractmethod
def get_shutdown_error_regex(self):
"""
Return various error message we may see from RPC agents while running
tests that check for failures. This function is used to match against
possible errors to ensure failures were raised properly.
"""
pass
@abstractmethod
def get_timeout_error_regex(self):
"""
Returns a partial string indicating the error we should receive when an
RPC has timed out. Useful for use with assertRaisesRegex() to ensure we
have the right errors during timeout.
"""
pass
| pytorch-master | torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py |
import torch.distributed.rpc as rpc
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_distributed import (
tp_transports,
)
class TensorPipeRpcAgentTestFixture(RpcAgentTestFixture):
@property
def rpc_backend(self):
return rpc.backend_registry.BackendType[
"TENSORPIPE"
]
@property
def rpc_backend_options(self):
return rpc.backend_registry.construct_rpc_backend_options(
self.rpc_backend,
init_method=self.init_method,
_transports=tp_transports()
)
def get_shutdown_error_regex(self):
# FIXME Once we consolidate the error messages returned by the
# TensorPipe agent put some more specific regex here.
error_regexes = [".*"]
return "|".join(["({})".format(error_str) for error_str in error_regexes])
def get_timeout_error_regex(self):
return "RPC ran for more than"
| pytorch-master | torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py |
import concurrent.futures
import contextlib
import json
import os
import sys
import threading
import time
from collections import namedtuple
from functools import partial
from threading import Event
from threading import Lock
from unittest import mock
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.distributed.autograd as dist_autograd
from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info, WorkerInfo
from torch.distributed.rpc.api import _use_rpc_pickler, _thread_local_var, _wait_all
from torch.distributed.rpc.internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from torch.futures import Future
from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
captured_output,
tp_transports,
)
from torch.testing._internal.common_utils import (
IS_MACOS,
load_tests,
sandcastle_skip_if,
get_cycles_per_ms,
)
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
wait_until_node_failure,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_utils import TemporaryFileName
from torch.autograd.profiler_legacy import profile as _profile
def foo_add():
return torch.add(torch.ones(1), torch.ones(1))
def udf_with_torch_ops(device=-1, use_record_function=False):
device_ctx = contextlib.suppress() if device == -1 else torch.cuda.device(device)
record_function_ctx = (
torch.autograd.profiler.record_function("##forward##")
if use_record_function
else contextlib.suppress()
)
with device_ctx, record_function_ctx:
t1, t2 = torch.ones(1), torch.ones(1)
t = torch.add(t1, t2)
t = torch.mul(t, t)
t = t.relu()
t = t.sigmoid()
# Events (operator invocations) that are expected to be ran as part of the above
# function.
EXPECTED_REMOTE_EVENTS = [
"aten::ones",
"aten::ones",
"aten::add",
"aten::mul",
"aten::relu",
"aten::clamp_min",
"aten::sigmoid",
]
# Remote operations are prefixed with the following string for RPC profiling.
REMOTE_OP_STR = "#remote_op: "
VALUE_FUTURE = concurrent.futures.Future()
DONE_FUTURE = concurrent.futures.Future()
FIFTY_MIL_CYCLES = 50000000
_rpc_barrier_count = 0
def _increment_count():
global _rpc_barrier_count
_rpc_barrier_count += 1
def _reset_count():
global _rpc_barrier_count
_rpc_barrier_count = 0
class StubRpcAgent:
def __init__(self, world_size):
self.world_size = world_size
def get_worker_infos(self):
return {
WorkerInfo(name=worker_name(rank), id=rank)
for rank in range(self.world_size)
}
def _stub_construct_rpc_backend_options_handler(**kwargs):
return mock.Mock() # RpcBackendOptions.
def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options):
return StubRpcAgent(world_size=world_size)
def set_value(value):
VALUE_FUTURE.set_result(value)
def wait_for_value_future():
return VALUE_FUTURE.result()
def set_and_check_done(value):
VALUE_FUTURE.set_result(value)
return DONE_FUTURE.result()
# it is used to test python user defined function over rpc
# classes and functions are used to test python user defined class and
# methods over rpc
TensorClass = namedtuple("TensorClass", ["tensors"])
class MyPickleClass:
def __init__(self):
self.t = None
def __getstate__(self):
(pickled_python_udf, tensors) = _internal_rpc_pickler.serialize(
PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None)
)
return (pickled_python_udf, tensors)
def __setstate__(self, obj):
python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1])
result = python_udf.func(python_udf.args[0], python_udf.args[1])
self.t = result
def set(self, val):
self.t = val
class SlowPickleClass:
def __init__(self, t):
self.t = t
def __getstate__(self):
time.sleep(self.t)
return (self.t, )
def __setstate__(self, obj):
self.t = obj[0]
time.sleep(self.t)
class MyClass:
def __init__(self, a, delay=False):
self.a = a
# delay initialization to simulate errors if specified
if delay:
time.sleep(2)
def my_instance_method(self, b):
return self.a + b
@classmethod
def my_class_method(cls, d, e):
return d + e
@staticmethod
def my_static_method(f):
return f > 10
def increment_value(self, increment):
self.a += increment
def get_value(self):
return self.a
def my_slow_method(self, my_tensor_arg):
time.sleep(5)
return torch.add(self.a, my_tensor_arg)
def _call_method_on_rref(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def get_rref_list(values):
return [RRef(MyClass(a)) for a in values]
def add_rref_to_value(rref, value):
return rref.to_here() + value
def run_nested_pickle(pickle_cls_instance, tensor):
return pickle_cls_instance.t + tensor
def build_sparse_tensor(coalesce=False):
i = [[0, 1, 1], [2, 0, 2]]
v = [3, 4, 5]
tensor = torch.sparse_coo_tensor(i, v, (2, 3))
if coalesce:
tensor = tensor.coalesce()
return tensor
def build_complex_tensors():
a = torch.ones(3, 3)
b = [a, a]
c = [b, b]
d = [a, b]
e = {a: d}
return [a, b, c, d, e]
def non_cont_test(t_view, t_cont):
if t_view.is_contiguous():
raise Exception('t_view is contiguous!')
if not t_cont.is_contiguous():
raise Exception('t_cont is not contiguous!')
if not torch.equal(t_view, t_cont):
raise Exception('t_view is not equal to t_cont!')
return t_view
def my_function(a, b, c):
return a + b + c
def my_tensor_function(a, b):
return a + b
def my_container_sum(a):
result = a[0]
for tensor in a[1:]:
result += tensor
return result
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
def my_complex_tensor_function(list_input, tensor_class_input, dict_input):
res = list_input[0]
for t in list_input:
res += t
for k, v in dict_input.items():
res += v
complex_tensors = tensor_class_input.tensors
return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2])
def my_rref_function(rref_a, rref_b):
return rref_a.to_here() + rref_b.to_here()
def delayed_add(a, b, seconds=0.05):
time.sleep(seconds)
return a + b
def identity(a):
return a
def no_result():
print("do nothing")
def raise_or_inc(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
def nested_rpc(dst):
return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
def nested_rpc_sparse(dst):
return rpc.rpc_sync(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
)
def multi_layer_nested_async_rpc(dst, world_size, ttl):
# this method returns immediately without blocking the callee, but will
# generate additional requests.
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
rpc.rpc_async(
current_dst,
multi_layer_nested_async_rpc,
args=(next_dst, world_size, ttl - 1),
)
return 0
def nested_rref(dst):
return (
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)),
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)),
)
def nested_rref_sparse(dst):
return (
rpc.remote(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
),
rpc.remote(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
),
)
def nested_remote(dst):
rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3))
return rref.to_here()
def nested_remote_sparse(dst):
rref = rpc.remote(dst, torch.add, args=(build_sparse_tensor(), build_sparse_tensor()))
return rref.to_here()
def rref_forward_chain(dst, world_size, rref, ttl):
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
ret_rref = rpc.remote(
current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1)
)
return [ret_rref]
else:
return rref.to_here()
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
def light_rpc():
return 0
def heavy_rpc(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
def heavy_rpc_sparse(tensor):
for i in range(1, 100):
tensor *= i
tensor = tensor / (i + 1)
return 0
@torch.jit.script
def heavy_rpc_torchscript(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
expected_err = "Expected error"
def raise_func():
raise ValueError(expected_err)
@torch.jit.script
def raise_func_script(expected_err: str) -> torch.Tensor:
raise ValueError(expected_err)
expected_err_escape = "\nFirst line of error \n next line of error \n last line of error"
def raise_func_escape():
raise ValueError(expected_err_escape)
global_rref = None
def set_global_rref(rref):
global global_rref
global_rref = rref
def clear_global_rref():
global global_rref
global_rref = None
def check_rref_confirmed(rref):
return rref.confirmed_by_owner()
def get_rref_debug_info():
return _rref_context_get_debug_info()
def add_use_future_cb(to, x, y, z):
out = concurrent.futures.Future()
def callback(fut):
out.set_result(fut.wait() + z)
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(callback)
return out.result()
def get_events_from_profile(profile_rref):
return profile_rref.local_value().process_global_function_events
def add_use_future_set_result(to, x, y, z):
out = torch.futures.Future()
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(lambda fut : out.set_result(fut.wait() + z))
return out.wait()
def add_use_future_nested_cb(to, x, y, z):
out = torch.futures.Future()
def callback(fut1):
fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z))
fut2.then(lambda fut2 : out.set_result(fut2.wait()))
fut1 = rpc.rpc_async(to, torch.add, args=(x, y))
fut1.then(callback)
return out.wait()
def fail_on_fut(fut):
pass
@rpc.functions.async_execution
def async_raise_func():
raise RuntimeError("Expected error")
@rpc.functions.async_execution
def async_wrong_type():
return torch.zeros(2, 2)
@rpc.functions.async_execution
def async_add(to, x, y):
return rpc.rpc_async(to, torch.add, args=(x, y))
def slow_add(x, y, device="cpu"):
time.sleep(1)
x = x.to(device)
y = y.to(device)
return torch.add(x, y).cpu()
@rpc.functions.async_execution
def slow_async_add(to, x, y, device="cpu"):
return rpc.rpc_async(to, slow_add, args=(x, y, device))
@rpc.functions.async_execution
def async_add_with_future_ctor(to, x, y, z):
fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut1: fut.set_result(fut1.wait() + z)
)
return fut
@rpc.functions.async_execution
def async_add_chained(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_chained_multi(to, x, num, step):
fut = rpc.rpc_async(to, torch.add, args=(x, 0))
for _ in range(num):
fut = fut.then(lambda fut: fut.wait() + step)
return fut
@rpc.functions.async_execution
def async_add_nested(to, x, y, z):
return rpc.rpc_async(to, async_add, args=(to, x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_multi_fanout(to, x, num, step):
futs = []
for i in range(num):
if i == 0:
futs.append(rpc.rpc_async(to, torch.add, args=(x, step)))
else:
futs.append(rpc.rpc_async(to, torch.add, args=(0, step)))
# TODO: use torch.futures.collect_all
lock = Lock()
state = {"cnt": 0, "ret": torch.zeros_like(x)}
ret_future = torch.futures.Future()
def inc_and_set(fut):
with lock:
state["cnt"] += 1
state["ret"] += fut.wait()
if state["cnt"] >= len(futs):
ret_future.set_result(state["ret"])
for fut in futs:
fut.then(inc_and_set)
return ret_future
@rpc.functions.async_execution
def async_cuda_sleep_and_set_to_one(t):
device = t.device
original_stream = torch.cuda.current_stream(device)
new_stream = torch.cuda.Stream(device)
new_stream.wait_stream(original_stream)
with torch.cuda.stream(new_stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
t.fill_(1)
fut = Future(devices=[device])
fut.set_result(t)
return fut
@rpc.functions.async_execution
def async_cuda_nested_add(to, x, y, z):
def cb(fut):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
return fut.value() + z
return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb)
# A custom Python class that contains a tensor, needed to see if we correctly
# use the Python pickler to extract tensors from non-IValue-convertible types.
class TensorWrapper:
__slots__ = ("tensor", "lock", "event", "thread")
def __init__(self, t):
self.tensor = t
# Add one non-picklable field, to ensure it's ignored/skipped.
self.lock = Lock()
self.event = torch.cuda.Event(enable_timing=True)
self.thread = threading.Thread()
self.thread.start()
def increase(self, v):
with self.lock:
self.tensor += v
def sum(self):
with self.lock:
self.event.record()
return self.tensor.sum()
class AsyncExecutionClass:
@staticmethod
@rpc.functions.async_execution
def static_async_add(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@classmethod
@rpc.functions.async_execution
def class_async_add(cls, to, x, y, z):
ret_fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: ret_fut.set_result(fut.wait() + z)
)
return ret_fut
@rpc.functions.async_execution
def bound_async_add(self, to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
def return_future():
return torch.futures.Future()
class FooBackendOptions(rpc.RpcBackendOptions):
def __init__(self, init_method):
# Must call the __init__ of the superclass (and do so directly,
# without using super()) because... pybind.
rpc.RpcBackendOptions.__init__(self)
self.init_method = init_method
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
class MyEmbeddingBagModel(torch.nn.Module):
def __init__(self, sparse):
super().__init__()
self.eb = torch.nn.EmbeddingBag(
10,
10,
sparse=sparse
)
def forward(self, x):
return self.eb(x)
class MyParameterServer:
def __init__(self, trainers):
self.lock = Lock()
self.trainers = trainers
self.iteration = 0
self.updates = 0
self.futures = []
self.total = None
self.gradient = None
@staticmethod
def get_gradient(rref):
return rref.local_value().gradient
@staticmethod
@rpc.functions.async_execution
def average(rref, riteration, tensor):
self = rref.local_value()
fut = torch.futures.Future()
with self.lock:
if riteration > self.iteration:
self.iteration = riteration
self.updates = 0
self.futures.clear()
self.futures.append(fut)
if self.total is None:
self.total = tensor
else:
self.total += tensor
self.updates += 1
if self.trainers == self.updates:
self.gradient = self.total / float(self.trainers)
for fut in self.futures:
result = self.total / float(self.trainers)
fut.set_result(result)
return fut
class MyConvNetForMNIST(nn.Module):
def __init__(self, device):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 16, 3, 1),
nn.ReLU(),
nn.Conv2d(16, 32, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(1),
nn.Linear(4608, 128),
nn.ReLU(),
nn.Linear(128, 10),
).to(device)
self.device = device
def forward(self, x, is_rref=False):
x = x.to_here() if is_rref else x
with torch.cuda.stream(torch.cuda.current_stream(self.device)):
# intentionally adding delay to current CUDA stream
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
return self.net(x)
def __getstate__(self):
# return an empty dict to avoid inspecting the model contents on the
# owner
return {}
class RpcTestCommon:
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _self_py_udf_remote(self, worker_info, x, y, z):
rref = rpc.remote(worker_info, my_function, args=(x, y, z))
self.assertEqual(rref.to_here(), x + y + z)
def _self_remote_rref_as_rpc_arg(self, dst, x, y, z):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(x, y, z))
fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, x))
ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, x + y))
self.assertEqual(ret, x + y + z + x + y)
self.assertEqual(fut.wait(), x + y + z + x)
def _self_remote_rref_as_remote_arg(self, dst, x, y, z):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(x, y, z))
ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, x))
self.assertEqual(
ret_rref.to_here(), x + y + z + x
)
def _world_size_one(self, a, b):
if self.rank == 0:
rpc.init_rpc(
name="me",
backend=self.rpc_backend,
rank=0,
world_size=1,
rpc_backend_options=self.rpc_backend_options,
)
def _rpc_sync(x, y):
expect = x * 2
result = rpc.rpc_sync(
"me",
my_tensor_function,
args=(x, y)
)
self.assertEqual(expect, result)
def _rpc_async(x, y):
expect = x * 2
result = rpc.rpc_async(
"me",
my_tensor_function,
args=(x, y)
).wait()
self.assertEqual(expect, result)
def _remote(x, y):
expect = x * 2
result = rpc.remote(
"me",
my_tensor_function,
args=(x, y)
).to_here()
self.assertEqual(expect, result)
_rpc_sync(a, b)
_rpc_async(a, b)
_remote(a, b)
rpc.shutdown()
def _multi_rpc(self, sparse):
dst_rank = (self.rank + 1) % self.world_size
for i in range(20):
n = i + self.rank + 1
if sparse:
x = build_sparse_tensor() * n
y = build_sparse_tensor() * n
else:
x = torch.ones(2, 2)
y = torch.ones(2, 2)
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(x, y),
)
self.assertEqual(ret, x * 2)
def _run_uneven_workload(self, f, x, num_repeat=30):
# worker0 drives and waits for worker1 and worker2
# throughout the test.
if self.rank == 0:
self.assertTrue(self.world_size >= 3)
# Phase 1: Only worker1 has workload.
dst = "worker1"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, f, args=(x,))
futs.append(fut)
for fut in torch.futures.collect_all(futs).wait():
self.assertEqual(fut.wait(), 0)
# Phase 2: Only worker2 has workload.
# If join is not correctly implemented,
# worker2 should be closed by now.
dst = "worker2"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, f, args=(x,))
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
def _wait_all_workers(self, f, x):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload(f, x)
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def _wait_all_workers_twice(self, f, x):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload(f, x)
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def _nested_rpc(self, f, expected):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
f,
args=(worker_name(self.rank),),
)
self.assertEqual(ret, expected)
def _stress_test_rpc(self, f, repeat=1000, args=()):
n = self.rank + 1
dst_rank = n % self.world_size
futs = []
tik = time.time()
for _ in range(repeat):
fut = rpc.rpc_async(worker_name(dst_rank), f, args=args)
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
tok = time.time()
print(
"Rank {} finished testing {} times in {} seconds.".format(
self.rank, repeat, tok - tik
)
)
def _builtin_remote_ret(self, x, y, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(x, y),
)
self.assertEqual(rref.to_here(), expected)
def _builtin_remote_self(self, x, y, expected):
rref = rpc.remote(
worker_name(self.rank),
torch.add,
args=(x, y),
)
self.assertEqual(rref.local_value(), expected)
def _test_multi_remote_call(self, fn, sparse, args_fn=lambda x, y: (), kwargs_fn=lambda x, y: {}):
m = 10
n = self.rank + 1
dst_rank = n % self.world_size
rrefs = []
expected = []
for i in range(m):
n = n + i
rrefs.append(
rpc.remote(
worker_name(dst_rank),
fn,
args=args_fn(n, sparse),
kwargs=kwargs_fn(n, sparse),
)
)
expected.append(fn(*args_fn(n, sparse), **kwargs_fn(n, sparse)))
for i in range(m):
self.assertEqual(rrefs[i].to_here(), expected[i])
def _py_rref_args(self, a, b, x, y, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(a, b)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(x, y)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), expected)
def _py_rref_args_user_share(self, a, b, c, x, y, z, expected):
n = self.rank + 1
owner_rank = n % self.world_size
user_rank = (n + 1) % self.world_size
rref_a = rpc.remote(
worker_name(owner_rank), my_function, args=(a, b, c)
)
rref_b = rpc.remote(
worker_name(owner_rank), my_function, args=(x, y, z)
)
rref_c = rpc.remote(
worker_name(user_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), expected)
def _py_rpc_rref_args(self, a, b, c, x, y, z, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), my_function, args=(a, b, c)
)
rref_b = rpc.remote(
worker_name(dst_rank), my_function, args=(x, y, z)
)
c = rpc.rpc_sync(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(c, expected)
def _nested_remote(self, f, expected):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), expected)
def _nested_rref(self, f, expected1, expected2):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref_of_rrefs = rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
# Say C has 2 OwnerRRefs.
# B has 2 UserRRefs to those 2 OwnerRRefs, respectively.
# This call is effectively A asking B to share its 2 UserRRefs.
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), expected1)
self.assertEqual(rrefs[1].to_here(), expected2)
def _nested_rref_stress(self, f, expected1, expected2):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
all_rrefs = []
for _ in range(20):
all_rrefs.append(
rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
)
for i in range(20):
rref_of_rrefs = all_rrefs[i]
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), expected1)
self.assertEqual(rrefs[1].to_here(), expected2)
def _trainer_func(self, rref, sparse):
m = MyEmbeddingBagModel(sparse=sparse)
loss_fn = nn.MSELoss()
for i in range(10):
outputs = m(torch.rand(10, 10).long())
loss_fn(outputs, torch.rand(10, 10)).backward()
gradient = list(m.parameters())[0].grad
fut = rref.rpc_async().average(rref, i, gradient)
gradient = fut.wait()
if gradient.is_sparse:
gradient = gradient.to_dense().double()
ps_gradient = rref.rpc_sync().get_gradient(rref)
if ps_gradient.is_sparse:
ps_gradient = ps_gradient.to_dense().double()
self.assertTrue(torch.equal(gradient, ps_gradient))
def _my_parameter_server(self, sparse):
ps_rref = RRef(MyParameterServer(self.world_size - 1))
futures = []
for index in range(1, self.world_size):
futures.append(
rpc.rpc_async(
worker_name((self.rank + index) % self.world_size),
self._trainer_func,
args=(
ps_rref,
sparse
),
)
)
torch.futures.wait_all(futures)
def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor):
# We check proper CUDA stream synchronization by adding to the tensor
# in one stream to get the expected value, and reading it from another stream.
future = Future(devices=["cuda:0"])
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
another_stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
if sparse_tensor:
tensor = build_sparse_tensor().to("cuda:0")
add_tensor = build_sparse_tensor().to("cuda:0")
expected_tensor = (tensor + add_tensor).coalesce()
else:
tensor = torch.zeros((100,), device="cuda:0")
add_tensor = torch.ones((100,), device="cuda:0")
expected_tensor = tensor + add_tensor
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor += add_tensor
if sparse_tensor:
tensor = tensor.coalesce()
future.set_result(wrapper(tensor))
with torch.cuda.stream(another_stream):
tensor = unwrapper(future.wait())
if sparse_tensor:
self.assertTrue(torch.eq(tensor.indices(), expected_tensor.indices()).all().item())
self.assertTrue(torch.eq(tensor.values(), expected_tensor.values()).all().item())
self.assertEqual(tensor.size(), expected_tensor.size())
else:
self.assertTrue(torch.eq(tensor, expected_tensor).all().item())
class RpcTest(RpcAgentTestFixture, RpcTestCommon):
@dist_init
def test_worker_id(self):
n = self.rank + 1
peer_rank = n % self.world_size
self_worker_info = rpc.get_worker_info()
peer_worker_info = rpc.get_worker_info(worker_name(peer_rank))
self.assertEqual(self_worker_info.name, worker_name(self.rank))
self.assertEqual(peer_worker_info.name, worker_name(peer_rank))
with self.assertRaisesRegex(RuntimeError, "could not find destination"):
unknown_worker_id = rpc.get_worker_info("WorkerUnknown")
@dist_init
def test_get_worker_infos(self):
worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos()
worker_names = {worker_info.name for worker_info in worker_infos}
expected_worker_names = {
worker_name(rank) for rank in range(self.world_size)
}
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init
def test_self_add(self):
self_worker_info = rpc.get_worker_info()
self_worker_name = worker_name(self.rank)
fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_send_to_rank(self):
dst_rank = (self.rank + 1) % self.world_size
# Test dense tensor
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test invalid ranks
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
@dist_init
def test_self_py_udf_remote(self):
self._self_py_udf_remote(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_rpc_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_rpc_arg(
dst,
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_self_rpc_arg(self):
self._self_remote_rref_as_rpc_arg(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_remote_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_remote_arg(
dst,
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_self_remote_arg(self):
self._self_remote_rref_as_remote_arg(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_rref_proxy_non_exist(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
msg = "has no attribute \'non_exist\'"
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_sync().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_async().non_exist().wait()
with self.assertRaisesRegex(AttributeError, msg):
rref.remote().non_exist()
def _test_rref_proxy_tensor(self, dst):
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
expected = torch.ones(2, 2) + 1 + 3
self.assertEqual(expected.size(), rref.rpc_sync().size())
self.assertEqual(expected + 1, rref.rpc_async().add(1).wait())
self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here())
@dist_init
def test_rref_proxy_tensor(self):
self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_tensor_self(self):
self._test_rref_proxy_tensor(rpc.get_worker_info())
@dist_init
def test_rref_proxy_reuse(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
my_function,
args=(torch.ones(2, 2), 1, 3)
)
expected = torch.ones(2, 2) + 1 + 3
proxy_rpc_sync = rref.rpc_sync()
proxy_rpc_async = rref.rpc_async()
proxy_remote = rref.remote()
self.assertEqual(expected.size(), proxy_rpc_sync.size())
self.assertEqual(expected + 1, proxy_rpc_sync.add(1))
self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4))
self.assertEqual(expected.size(), proxy_rpc_async.size().wait())
self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait())
self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait())
self.assertEqual(expected.size(), proxy_remote.size().to_here())
self.assertEqual(expected + 5, proxy_remote.add(5).to_here())
self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here())
def _test_rref_proxy_class(self, dst):
rref = rpc.remote(dst, MyClass, args=(7,))
expected = MyClass(7)
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
expected.increment_value(3)
self.assertEqual(None, rref.rpc_sync().increment_value(1))
self.assertEqual(None, rref.rpc_async().increment_value(1).wait())
self.assertEqual(None, rref.remote().increment_value(1).to_here())
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
self.assertEqual(
expected.my_instance_method(2),
rref.rpc_sync().my_instance_method(2)
)
self.assertEqual(
expected.my_instance_method(3),
rref.rpc_async().my_instance_method(3).wait()
)
self.assertEqual(
expected.my_instance_method(4),
rref.remote().my_instance_method(4).to_here()
)
self.assertEqual(
expected.my_static_method(9),
rref.rpc_sync().my_static_method(9)
)
self.assertEqual(
expected.my_static_method(10),
rref.rpc_async().my_static_method(10).wait()
)
self.assertEqual(
expected.my_static_method(11),
rref.remote().my_static_method(11).to_here()
)
self.assertEqual(
expected.my_class_method(2, torch.zeros(2, 2)),
rref.rpc_sync().my_class_method(2, torch.zeros(2, 2))
)
self.assertEqual(
expected.my_class_method(2, torch.ones(3, 3)),
rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait()
)
self.assertEqual(
expected.my_class_method(2, torch.ones(4, 4)),
rref.remote().my_class_method(2, torch.ones(4, 4)).to_here()
)
@dist_init
def test_rref_proxy_class(self):
self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_class_self(self):
self._test_rref_proxy_class(rpc.get_worker_info())
@mock.patch.object(torch.distributed.autograd, "_init")
@mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent")
@dist_init(setup_rpc=False)
def test_register_rpc_backend_and_set_and_start_rpc_backend(
self, mock_rpc_agent, mock_dist_autograd_init
):
backend_name = "stub_backend"
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
with self.assertRaisesRegex(
RuntimeError, "^RPC backend .+: already registered$"
):
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
rpc.init_rpc(
name="worker1",
backend=backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
store, _, _ = next(
torch.distributed.rendezvous(
self.init_method, rank=self.rank, world_size=self.world_size
)
)
rpc._init_rpc_backend(
backend=self.rpc_backend,
store=store,
name="duplicate_name",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name_2(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
rpc.init_rpc(
name=worker_name(self.rank % (self.world_size - 1)),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_reinit(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
# TODO: with TCP init, rank 0 raises Address already in use because
# rank 0 is the start daemon and the store is created before checking if
# RPC is already initialized in init_rpc.
if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0:
expected_reinit_err = "Address already in use"
else:
expected_reinit_err = "is already initialized"
with self.assertRaisesRegex(RuntimeError, expected_reinit_err):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_pg_init_no_rpc_init(self):
dist.init_process_group(
backend='gloo',
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size)
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(3, 4)
def forward(self, x):
return self.lin(x)
model = MyModel()
model.train()
model = torch.nn.parallel.DistributedDataParallel(model)
with self.assertRaisesRegex(RuntimeError, 'Current RPC agent is not set! Did you initialize the RPC framework'):
params = []
for param in model.parameters():
params.append(RRef(param))
def test_world_size_one(self):
self._world_size_one(
torch.ones(2, 2),
torch.ones(2, 2)
)
@dist_init(setup_rpc=False)
def test_invalid_names(self):
worker_id = 0
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo("abc*", worker_id)
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo(" ", worker_id)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
info = WorkerInfo("", worker_id)
# If the number in the message does not match, it is likely that the
# value of MAX_NAME_LEN in RPC WorkerInfo has changed.
with self.assertRaisesRegex(RuntimeError, "shorter than 128"):
info = WorkerInfo("".join(["a" for i in range(500)]), worker_id)
# Test that WorkerInfo can be pickled and sent in RPC call
@dist_init
def test_worker_info_pickle(self):
dst_rank = (self.rank + 1) % self.world_size
worker_info = rpc.api.get_worker_info()
ret = rpc.rpc_sync(worker_name(dst_rank), identity, args=(worker_info,))
self.assertEqual(ret, worker_info)
@dist_init
def test_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@staticmethod
def return_callee_id():
return rpc.get_worker_info().id
@dist_init
def test_int_callee(self):
dst_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id)
self.assertEqual(ret, dst_rank)
@dist_init
def test_add_with_id(self):
n = self.rank + 1
dst_rank = n % self.world_size
workder_info = rpc.get_worker_info(worker_name(dst_rank))
ret = rpc.rpc_sync(
workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n))
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_scalar_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n)
)
self.assertEqual(ret, (torch.ones(n, n) + n))
@dist_init
def test_async_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_nonzero(self):
n = self.rank + 1
dst_rank = n % self.world_size
x = torch.ones(self.world_size, self.world_size)
x[self.rank][self.rank] = 0
ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,))
self.assertEqual(ret, x.nonzero())
@dist_init
def test_multi_rpc(self):
self._multi_rpc(False)
@dist_init
def test_future_wait_twice(self):
dst = worker_name((self.rank + 1) % self.world_size)
futs = []
for i in range(20):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
torch.futures.wait_all(futs)
for fut in futs:
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
@dist_init(setup_rpc=False)
def test_wait_all_workers_timeout(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
og_func = rpc.api._wait_all_workers
def wait_all_workers_sleep(timeout):
try:
rpc.api._all_gather(SlowPickleClass(0.5), timeout=timeout)
except RuntimeError as ex:
raise ex
rpc.api._wait_all_workers = wait_all_workers_sleep
try:
with self.assertRaisesRegex(RuntimeError, ''):
rpc.shutdown(graceful=True, timeout=0.01)
finally:
rpc.api._wait_all_workers = og_func
dist.barrier()
def test_wait_all_workers_dense(self):
self._wait_all_workers(heavy_rpc, torch.ones(100, 100))
def test_wait_all_workers_twice_dense(self):
self._wait_all_workers_twice(heavy_rpc, torch.ones(100, 100))
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
def _test_barrier_helper(self, info, names, multi_threaded=False):
names = sorted(names)
leader = names[0]
rpc.rpc_sync(leader, _reset_count)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, 0)
rpc.api._barrier(names)
rpc.rpc_sync(leader, _increment_count)
rpc.api._barrier(names)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, len(names))
@dist_init
def test_rpc_barrier_all(self):
# Test rpc barrier when called with full list of workers
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_subset(self):
# Test rpc barrier when processes are called with different subsets of the full list
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [worker.name for worker in all_worker_info if not worker.id % 2]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_partial_subset(self):
# Test rpc barrier when some processes are not involved in the barrier
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [f"worker{info.id}"]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_multithreaded(self):
# This tests validates the implementation of barrier when multiple threads call into it
# We only need to check that it does not hang in this case
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
threads = []
for _ in range(3):
th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True))
threads.append(th)
th.start()
for th in threads:
th.join()
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload(heavy_rpc, torch.ones(100, 100))
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with _profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = [
event for event in remote_events if "aten::add" in event.name
][0]
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with _profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = [
event for event in local_function_events if "aten::add" in event.name
][0]
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with _profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
self.assertEqual({0}, event_cpu_mem_usages)
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
events = p.function_events
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event['name'] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]:
event_exists = any([expected_event_name in event_name for event_name in event_names])
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixied when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
ret = fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key)
+ len(remote_op_key) :
]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(
set(remote_events_list),
set(EXPECTED_REMOTE_EVENTS),
f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}",
)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
with _profile() as prof:
ret = rpc.rpc_async(
dst1, slow_async_add, args=(dst2, x, y, device), timeout=20
)
out = ret.wait()
function_events = prof.function_events
# slow_async_add resulted in an RPC from dst1 -> dst2, so this should be
# recorded.
key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1
)
nested_rpc_key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2
)
expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix
remote_events = [event for event in function_events if event.is_remote]
rpc_remote_event = [
event for event in remote_events if event.name == expected_key
]
self.assertEqual(1, len(rpc_remote_event))
rpc_remote_event = rpc_remote_event[0]
self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size)
# slow_async_add's RPC does an add on dst2, which should be reflected as well.
remote_add_key = (
expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add)
)
remote_add_event = [
event for event in remote_events if event.name == remote_add_key
]
self.assertEqual(1, len(remote_add_event))
remote_add_event = remote_add_event[0]
# Validate that node_id is dst2.
self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size)
@dist_init
def test_rpc_profiling_async_function(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_async_function_single_threaded(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_remote_record_function(self):
# test that functions run over RPC with record_function show the expected
# profiled block.
if self.rank != 1:
return
dst_ranks = [i for i in range(self.world_size) if i != self.rank]
for dst_rank in dst_ranks:
dst_worker = worker_name(dst_rank)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True))
fut.wait()
function_events = prof.function_events
record_function_remote_event = [
evt for evt in function_events if "##forward##" in evt.name
]
self.assertEqual(1, len(record_function_remote_event))
record_function_remote_event = record_function_remote_event[0]
self.assertEqual(record_function_remote_event.node_id, dst_rank)
# cpu_children only returns direct children, so here we get all
# children recursively.
def get_cpu_children(event):
if not event.cpu_children:
return []
cpu_children = event.cpu_children
for e in event.cpu_children:
cpu_children.extend(get_cpu_children(e))
return cpu_children
remote_children = get_cpu_children(record_function_remote_event)
# Get local children and verify parity.
with _profile() as prof:
udf_with_torch_ops(-1, True)
local_function_events = prof.function_events
local_record_function_event = [
evt for evt in local_function_events if "##forward##" in evt.name
][0]
local_children = get_cpu_children(local_record_function_event)
local_children_names = [
evt.name for evt in local_children
]
REMOTE_OP_STR = "#remote_op: "
def convert_remote_to_local(event_name):
remote_op_key = REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key) + len(remote_op_key) :
]
for evt in remote_children:
local_name = convert_remote_to_local(evt.name)
self.assertTrue(local_name in local_children_names)
def validate_profiling_workload(self, dst, prof):
def convert_remote_to_local(event_name):
return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :]
events = prof.function_events
remote_events = {
convert_remote_to_local(event.name): event
for event in events
if event.is_remote
}
self.assertTrue("aten::mul" in remote_events)
remote_mul_event = remote_events["aten::mul"]
self.assertEqual(remote_mul_event.node_id, dst)
self.check_profiling_info(
worker_name(self.rank),
worker_name(dst),
torch.mul,
remote_mul_event,
RPCExecMode.ASYNC,
)
def _run_test_profiler_with_autograd_context(self):
dst = (self.rank + 1) % self.world_size
if self.rank == 1:
# Cases where we can double wrap messages with profiling information and autograd info.
with dist_autograd.context() as context_id:
with _profile() as prof:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
# Ensure that flipped order of ctx managers results in events being
# recorded as expected.
with _profile() as prof:
with dist_autograd.context() as context_id:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
@dist_init
def test_profiler_with_autograd_context_single_threaded(self):
self._run_test_profiler_with_autograd_context()
@dist_init
def test_profiler_with_autograd_context(self):
self._run_test_profiler_with_autograd_context()
def _profiler_test_with_rpc(
self, rpc_exec_mode, func, args, use_record_function=False, dst=None, kineto_profile=False
):
dst = dst if dst is not None else (self.rank + 1) % self.world_size
# only run profiler on rank 1.
p = _profile if not kineto_profile else torch.profiler.profile # kineto
if self.rank == 1:
with p() as prof:
record_function_ctx_mgr = (
contextlib.suppress()
if not use_record_function
else torch.autograd.profiler.record_function(
"foo"
)
)
with record_function_ctx_mgr as rf:
if rpc_exec_mode == RPCExecMode.SYNC:
rpc.rpc_sync(worker_name(dst), func, args=args)
elif rpc_exec_mode == RPCExecMode.ASYNC:
fut = rpc.rpc_async(worker_name(dst), func, args=args)
if kineto_profile:
# Ensure multiple async RPCs don't cause issues.
# Would have raised
# "RuntimeError: Cannot call
# RemoteProfilerManager::setCurrentKey when current
# key is already set." error if RPC profiling was
# not disabled properly for kineto.
fut2 = rpc.rpc_async(worker_name(dst), func, args=args)
fut2.wait()
fut.wait()
else:
self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE)
rref = rpc.remote(worker_name(dst), func, args=args)
rref.to_here()
# To avoid flakiness, wait for the RRef to be profiled. This
# means that we received the acknowledgement of successful
# creation on the owner and ran the callbacks responsible
# for recording the profiling event.
rref._get_profiling_future().wait()
events = prof.function_events if not kineto_profile else prof.events()
if kineto_profile:
# RPC profiling is disabled so there should be no rpc related
# events.
with self.assertRaises(IndexError):
get_function_event(events, rpc_exec_mode.value)
return
rpc_event = get_function_event(events, rpc_exec_mode.value)
# verify Node ID for this rpc event.
self.assertEqual(rpc_event.node_id, self.rank)
# Ensure recording of remote events.
remote_events = {event for event in events if event.node_id == dst} - {rpc_event}
self.assertGreaterEqual(len(remote_events), 1)
for remote_event in remote_events:
self.assertEqual(remote_event.node_id, dst)
if use_record_function:
scope_event = get_function_event(events, "foo")
# Since RPC call is within the scope, its CPU interval should be
# contained within foo's interval.
self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start)
self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end)
# the sender, dest worker, function run, and type of RPC should all
# be recorded.
self_worker_name = worker_name(self.rank)
dst_worker_name = worker_name(dst)
self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode)
if use_record_function:
# verify order by ensuring that the outer context comes
# before the rpc event.
foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name)
rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name)
self.assertLess(foo_event_ix, rpc_event_idx)
def _run_test_profiler_with_sync_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_sync_rpc_udf(self):
self._run_test_profiler_with_sync_rpc_udf()
@dist_init
def test_profiler_with_sync_rpc_udf_single_threaded(self):
self._run_test_profiler_with_sync_rpc_udf()
def _run_test_profiler_with_sync_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_sync_rpc_builtin(self):
self._run_test_profiler_with_sync_rpc_builtin()
@dist_init
def test_profiler_with_sync_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_sync_rpc_builtin()
def _run_test_profiler_with_async_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,),
use_record_function=True)
# Test to ensure that kineto profiler enabled in RPC does not enable
# RPC profiling (it is unsupported) and does not result in issues.
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_sleep_func, args=(1,), kineto_profile=True
)
@dist_init
def test_profiler_with_async_rpc_udf(self):
self._run_test_profiler_with_async_rpc_udf()
@dist_init
def test_profiler_with_async_rpc_udf_single_threaded(self):
self._run_test_profiler_with_async_rpc_udf()
def _run_test_profiler_with_async_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_async_rpc_builtin(self):
self._run_test_profiler_with_async_rpc_builtin()
@dist_init
def test_profiler_with_async_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_async_rpc_builtin()
def _run_test_profiler_with_remote_udf(self):
self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank
)
@dist_init
def test_profiler_with_remote_udf(self):
self._run_test_profiler_with_remote_udf()
@dist_init
def test_profiler_with_remote_udf_single_threaded(self):
self._run_test_profiler_with_remote_udf()
def _run_test_profiler_with_remote_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
dst=self.rank,
)
@dist_init
def test_profiler_with_remote_builtin(self):
self._run_test_profiler_with_remote_builtin()
@dist_init
def test_profiler_with_remote_builtin_single_threaded(self):
self._run_test_profiler_with_remote_builtin()
def _run_test_profiler_with_script_async_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_async_rpc(self):
self._run_test_profiler_with_script_async_rpc()
@dist_init
def test_profiler_with_script_async_rpc_single_threaded(self):
self._run_test_profiler_with_script_async_rpc()
def _run_test_profiler_with_script_sync_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_sync_rpc(self):
self._run_test_profiler_with_script_sync_rpc()
@dist_init
def test_profiler_with_script_sync_rpc_single_threaded(self):
self._run_test_profiler_with_script_sync_rpc()
def _run_test_profiler_with_script_remote_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank
)
@dist_init
def test_profiler_with_script_remote_rpc(self):
self._run_test_profiler_with_script_remote_rpc()
@dist_init
def test_profiler_with_script_remote_rpc_single_threaded(self):
self._run_test_profiler_with_script_remote_rpc()
def _assert_top_level_events(self, process_global_events, expected_top_level_event_names):
top_level_event_names = []
for thread_local_events in process_global_events:
# Get top-level events from all events happened on a thread.
last_end_time = 0
for event in thread_local_events:
event_name = event.name
time_range = event.time_range
if time_range.start > last_end_time:
top_level_event_names.append(event_name)
last_end_time = time_range.end
top_level_event_names = sorted(top_level_event_names)
expected_top_level_event_names = sorted(expected_top_level_event_names)
self.assertEqual(
top_level_event_names,
expected_top_level_event_names,
f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}",
)
@dist_init
def test_server_process_global_profiler(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
x = torch.tensor(1)
y = torch.tensor(2)
outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
outer_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
inner_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
inner_profile_rref.rpc_sync().__exit__(None, None, None)
outer_profile_rref.rpc_sync().__exit__(None, None, None)
inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,))
expected_inner_events = ['aten::sub']
expected_outer_events = expected_inner_events + ['aten::add']
self._assert_top_level_events(inner_events, expected_inner_events)
outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,))
self._assert_top_level_events(outer_events, expected_outer_events)
inner_profile_rref.rpc_sync().key_averages()
outer_profile_rref.rpc_sync().key_averages()
@dist_init
def test_async_record_function_double_end_callbacks(self):
num_sleep_seconds = 1
if self.rank == 1:
# Validate that calling the function twice results in an error.
with _profile() as pf:
with torch.autograd.profiler.record_function("foo") as rf:
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
rf._call_end_callbacks_on_future(fut)
with self.assertRaisesRegex(
RuntimeError, "can only be called once."
):
rf._call_end_callbacks_on_future(fut)
fut.wait()
@dist_init
def test_async_record_function_double_end_callbacks_new_signatures(self):
# Test the new _record_function ops work
# Note: Remove once record_function uses these directly
num_sleep_seconds = 1
if self.rank == 1:
with _profile() as pf:
try:
record = torch.ops.profiler._record_function_enter_new("foo", None)
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut)
finally:
torch.ops.profiler._record_function_exit(record)
fut.wait()
@dist_init
def test_async_record_function_cbs_jit_call(self):
if self.rank == 1:
with _profile() as pf:
key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(my_script_func),
"worker1",
"worker0",
)
with torch.autograd.profiler.record_function(key) as rf:
fut = rpc.rpc_async(
worker_name(0), my_script_func, args=(torch.tensor(1),)
)
# Intentionally calling record_function internals
fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.handle, fut)
result = fut.wait()
# Validate that the profiling future returns the same value as the RPC
# future.
expected = torch.add(torch.tensor(1), torch.tensor(1))
self.assertEqual(result, expected)
events = pf.function_events
rpc_event = get_function_event(
events, torch._jit_internal._qualified_name(my_script_func)
)
self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name)
@dist_init
def test_py_class_constructor(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,))
self.assertEqual(ret.a, n)
@dist_init
def test_py_class_instance_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,)
)
self.assertEqual(ret, MyClass(2).my_instance_method(n))
@dist_init
def test_py_class_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1)
)
self.assertEqual(ret, MyClass.my_class_method(n, n + 1))
@dist_init
def test_py_class_static_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,)
)
self.assertEqual(ret, MyClass.my_static_method(n + 10))
@dist_init
def test_py_multi_async_call(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker_info = rpc.get_worker_info(worker_name(dst_rank))
fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,))
fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2))
self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10))
self.assertEqual(fut2.wait(), min(n, n + 1, n + 2))
@dist_init
def test_py_no_return_result(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), no_result)
self.assertEqual(ret, no_result())
@dist_init
def test_py_tensors(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n)))
@dist_init
def test_py_tensors_multi_async_call(self):
futs = []
n = self.rank + 1
dst_rank = n % self.world_size
for i in range(100):
fut = rpc.rpc_async(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(i, i), torch.ones(i, i)),
)
futs.append(fut)
j = 0
for val in torch.futures.wait_all(futs):
self.assertEqual(
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
)
j += 1
@dist_init
def test_py_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [torch.ones(n, n), torch.ones(n, n)]
b = TensorClass(build_complex_tensors())
c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)}
ret = rpc.rpc_sync(
worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c)
)
self.assertEqual(ret, my_complex_tensor_function(a, b, c))
@dist_init
def test_py_nested_pickle(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_nested_pickle,
args=(MyPickleClass(), torch.ones(2, 2)),
)
m = MyPickleClass()
m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2)))
self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2)))
@dist_init
def test_py_function_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
with self.assertRaises(TypeError):
ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,))
@dist_init
def test_py_raise_in_user_func(self):
with captured_output() as (_, err):
# This barrier prevents a race condition where the main thread has
# not entered the context manager when the remote function runs.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaisesRegex(ValueError, expected_err):
fut.wait()
# This barrier prevents a race condition where the main thread exits
# context manager before the remote function has ran.
dist.barrier()
# Validate that trainers log errors when running functions.
stderr_lines = err.getvalue()
self.assertTrue(expected_err in stderr_lines)
@dist_init
def test_py_raise_in_user_func_escaped_str(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape)
try:
fut.wait()
except ValueError as e:
msg = str(e)
# Ensure newlines are unescaped to provide a better repr of error.
self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape"))
else:
self.assertTrue(False, "expected raise_func_escape to raise ValueError.")
@dist_init
def test_nested_rpc(self):
self._nested_rpc(nested_rpc, torch.ones(2, 2) + 1)
@dist_init
def test_stress_light_rpc(self):
self._stress_test_rpc(light_rpc)
@dist_init
def test_stress_heavy_rpc(self):
self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_stress_heavy_rpc_torchscript(self):
self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_builtin_remote_ret(self):
self._builtin_remote_ret(
torch.ones(2, 2),
torch.ones(2, 2),
torch.ones(2, 2) * 2
)
@dist_init
def test_builtin_remote_self(self):
self._builtin_remote_self(
torch.ones(2, 2),
torch.ones(2, 2),
torch.ones(2, 2) * 2
)
@staticmethod
def _multi_args_fn(n, sparse=False):
if sparse:
return (build_sparse_tensor(), build_sparse_tensor())
else:
return (torch.ones(n, n), torch.ones(n, n))
@dist_init
def test_multi_builtin_remote_ret(self):
self._test_multi_remote_call(
torch.add, False,
args_fn=RpcTest._multi_args_fn
)
@dist_init
def test_py_udf_remote(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2))
@staticmethod
def _multi_kwargs_fn(n, sparse=False):
if sparse:
return {
"a": build_sparse_tensor(),
"b": build_sparse_tensor(),
"c": build_sparse_tensor()
}
else:
return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)}
@dist_init
def test_multi_py_udf_remote(self):
self._test_multi_remote_call(
my_function,
False,
kwargs_fn=RpcTest._multi_kwargs_fn
)
@dist_init
def test_py_rref_args(self):
self._py_rref_args(
torch.ones(2, 2),
1,
torch.ones(2, 2),
2,
torch.ones(2, 2) * 2 + 3)
@dist_init
def test_py_rref_args_user_share(self):
self._py_rref_args_user_share(
torch.ones(2, 2),
1,
2,
torch.ones(2, 2),
3,
4,
torch.ones(2, 2) * 2 + 10
)
@dist_init
def test_py_rpc_rref_args(self):
self._py_rpc_rref_args(
torch.ones(2, 2),
1,
2,
torch.ones(2, 2),
3,
4,
torch.ones(2, 2) * 2 + 10
)
@dist_init
def test_nested_remote(self):
self._nested_remote(
nested_remote,
torch.ones(2, 2) + 3
)
@dist_init
def test_nested_rref(self):
self._nested_rref(
nested_rref,
torch.ones(2, 2) + 1,
torch.ones(2, 2) + 2
)
@dist_init
def test_nested_rref_stress(self):
self._nested_rref_stress(
nested_rref,
torch.ones(2, 2) + 1,
torch.ones(2, 2) + 2
)
@dist_init
def test_multi_layer_nested_async_rpc(self):
# This test will exit right away, but there will be a chain of async
# RPCs. The termination algorithm should detect those messages properly.
# Otherwise, some peer could exit early, leaving others to timeout
# errors or connection closed errors.
ttl = 20
n = self.rank + 1
dst_rank = n % self.world_size
multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl)
@dist_init
def test_remote_with_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
# check ref to other workers
rref = rpc.remote(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
rref.to_here()
# check ref to itself
rref = rpc.remote(worker_name(self.rank), no_result, args=(10,))
with self.assertRaises(TypeError):
rref.to_here()
@dist_init
def test_rpc_return_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.rpc_sync(
worker_name(dst_rank1),
rpc_return_rref,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_rref_forward_chain(self):
ttl = 8
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl)
for i in range(ttl):
self.assertEqual(len(ret_rref), 1)
ret_rref = ret_rref[0].to_here()
ret = ret_rref
self.assertEqual(ret, torch.add(torch.ones(n, n), 1))
@dist_init
def test_local_rref_no_fork(self):
local_rref = RRef(35)
self.assertEqual(local_rref.local_value(), 35)
@dist_init
def test_local_value_not_on_owner(self):
# ensure that an error message is thrown if a user tries to call
# local_value() on a non-owning node.
next_rank = (self.rank + 1) % self.world_size
rref = rpc.remote(
worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
with self.assertRaisesRegex(
RuntimeError, (
fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), "
fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), "
r"can't call localValue\(\) on user "
fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). "
fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)"
)
):
rref.local_value()
@dist_init
def test_return_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_list = rpc.rpc_sync(
worker_name(dst_rank), get_rref_list, args=([1, 2, 3],)
)
for rref in rref_list:
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, 10),
)
rets = [
rpc.rpc_sync(
rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref)
)
for rref in rref_list
]
self.assertEqual(rets, [11, 12, 13])
@dist_init
def _test_rref_type(self, blocking):
def launched_rpc(events):
expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner"
return any([e.name.startswith(expected_name) for e in events])
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1))
with _profile() as p:
t = rref._get_type(blocking=blocking)
if not blocking:
t = t.wait()
self.assertTrue(launched_rpc(p.function_events))
expected_type = type(torch.ones(2))
self.assertEqual(t, expected_type)
futs = []
def verify(fut):
self.assertEqual(fut.value(), expected_type)
with _profile() as p:
for _ in range(10):
t = rref._get_type(blocking=blocking)
if not blocking:
futs.append(t)
t.add_done_callback(verify)
t = t.wait()
self.assertEqual(t, expected_type)
if not blocking:
# Note that cached calls with blocking=False all return the same
# cached original future.
first_fut = futs[0]
for f in futs[1:]:
self.assertTrue(f is first_fut)
# Ensure we never launch another RPC, other than for the very
# first call.
self.assertFalse(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
rref = rpc.remote(dst, MyClass, args=(0,))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_blocking(self):
self._test_rref_type(blocking=True)
def test_rref_type_non_blocking(self):
self._test_rref_type(blocking=False)
@dist_init
def _test_rref_type_with_error(self, blocking):
dst = worker_name((self.rank + 1) % self.world_size)
# 10 ms timeout
rref = rpc.remote(dst, raise_func)
# Blocking: error raised inline
if blocking:
with self.assertRaisesRegex(ValueError, "Expected error"):
rref._get_type(blocking=blocking)
else:
# Non-blocking: Immediately return future, block on wait
fut = rref._get_type(blocking=blocking)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def test_rref_type_with_error_blocking(self):
self._test_rref_type_with_error(blocking=True)
def test_rref_type_with_error_non_blocking(self):
self._test_rref_type_with_error(blocking=False)
@dist_init
def _test_rref_type_owner(self, blocking):
rref = RRef(torch.ones(2) + 1)
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, type(torch.ones(2)))
rref = RRef(MyClass(0))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_owner_blocking(self):
self._test_rref_type_owner(blocking=True)
def test_rref_type_owner_non_blocking(self):
self._test_rref_type_owner(blocking=False)
@staticmethod
def _slow_add(x, y):
time.sleep(1)
return x + y
@dist_init
def test_rref_type_slow_init(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1))
self.assertEqual(rref._get_type(), type(torch.ones(2)))
@dist_init
def test_owner_equality(self):
a = RRef(40)
b = RRef(50)
other_rank = (self.rank + 1) % self.world_size
other_a = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_b = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_a.to_here() # to ensure clean termination
other_b.to_here()
self.assertNotEqual(a.owner(), 23)
self.assertEqual(other_a.owner(), other_b.owner())
self.assertNotEqual(a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_b.owner())
self.assertEqual(a.owner(), a.owner())
self.assertEqual(a.owner(), b.owner())
self.assertEqual(a.owner(), rpc.get_worker_info())
x = dict()
x[a.owner()] = a
x[other_a.owner()] = other_a
self.assertEqual(x[a.owner()], a)
self.assertEqual(x[b.owner()], a)
self.assertEqual(x[other_a.owner()], other_a)
self.assertEqual(x[other_b.owner()], other_a)
self.assertEqual(len(x), 2)
@dist_init
def test_pass_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker = worker_name(dst_rank)
rref = RRef(40)
self.assertEqual(
rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90
)
self.assertEqual(
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90
)
self.assertEqual(
rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90
)
@dist_init
def test_remote_same_worker(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init(setup_rpc=True)
def test_call_method_on_rref(self):
"""
Tests that it is possible to call an instance method on a remote objet
by using rref.owner() as destination of the call.
"""
vals = [10, 2, 5, 7]
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# creates a remote object
rref = rpc.remote(dst_worker, MyClass, args=(vals[0],))
# modifies state of the remote object
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[1]),
)
rpc.rpc_async(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[2]),
).wait()
rpc.remote(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[3]),
).to_here()
# queries state of the remote object
result = rpc.rpc_sync(
dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref)
)
self.assertEqual(result, sum(vals))
# Notice `rpc.api.shutdown()` accesses
# `_delete_all_user_and_unforked_owner_rrefs` through
# `torch.distributed.rpc.api`, so patching
# `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will
# not help.
@mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs")
def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.ones(2, 2), 1),
)
import torch.distributed.rpc.api as api
if ignore_leak:
api._ignore_rref_leak = True
rpc.shutdown(graceful=True)
else:
api._ignore_rref_leak = False
with self.assertRaisesRegex(RuntimeError, "Leaking RRef"):
rpc.shutdown(graceful=True)
@dist_init(setup_rpc=False)
def test_rref_leak(self):
self._test_rref_leak(ignore_leak=False)
@dist_init(setup_rpc=False)
def test_ignore_rref_leak(self):
self._test_rref_leak(ignore_leak=True)
@dist_init
def test_rref_str(self):
rref1 = RRef(self.rank)
id_class = "GloballyUniqueId"
self.assertEqual(
"OwnerRRef({}(created_on={}, local_id=0))".format(id_class, self.rank), rref1.__str__()
)
dst_rank = (self.rank + 1) % self.world_size
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(
rref2.__str__(),
"UserRRef(RRefId = {0}(created_on={1}, local_id=1), ForkId = {0}(created_on={1}, local_id=2))".format(
id_class, self.rank
),
)
@dist_init
def test_rref_get_future(self):
# Tests that we can obtain the future corresponding to the creation of
# the RRef on remote end
if self.rank == 0:
# Builtin
rref = rpc.remote(worker_name(1), torch.add, args=(1, 1))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# UDF
rref = rpc.remote(worker_name(1), foo_add, args=())
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# Script
rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), ))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
@dist_init
def test_rref_context_debug_info(self):
# This test checks local states that are modified by remote workers.
# This means that we would need barrier before and after every check.
# The barrier before the check makes sure that all previous states are
# cleared globally, the barrier after ensures that no following states
# change gets into the current check.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Check 1: local RRef does not update owners_ map or add a pending user.
#################################################
rref1 = RRef(self.rank)
# don't need a barrier here as local RRef is handled by this thread
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertIn("num_pending_users", info)
# RRef on local value is not added to context until shared across RPC
self.assertEqual(0, int(info["num_owner_rrefs"]))
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after the check 1
dist.barrier()
# Check 2: Sharing RRef as an arg should update owners_ map
###########################################################
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,))
# barrier before check 2
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(1, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 2
dist.barrier()
# clear states for check 2
rpc.rpc_sync(worker_name(dst_rank), clear_global_rref)
# Wait for owner rref to be cleared.
while int(info["num_owner_rrefs"]) != 0:
info = _rref_context_get_debug_info()
time.sleep(0.1)
dist.barrier()
# Check 3: rpc.remote call should update owners_ map
####################################################
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref3 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref2.to_here()
rref3.to_here()
# barrier before check 3
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(2, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 3
dist.barrier()
@dist_init
def test_disable_gil_profiling(self):
# test that rpc.enable_gil_profiling(false) will result in
# GIL wait time not being recorded.
# GIL profiling should be disabled by default.
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"])
rpc.enable_gil_profiling(True)
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.gil_average_wait_time_us", info)
@dist_init(setup_rpc=False)
def test_local_shutdown(self):
# test that we can start RPC and then immediately locally shutdown
# without sending any messages.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init
def test_debug_info(self):
# only test keys in this test case. Values should be covered by
# individual module debug info tests
import torch.distributed.autograd as dist_autograd
info = _get_debug_info()
rref_info = _rref_context_get_debug_info()
agent_info = rpc.api._get_current_rpc_agent().get_debug_info()
autograd_info = dist_autograd._get_debug_info()
common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys()
self.assertEqual(0, len(common_keys))
expected = {}
expected.update(rref_info)
expected.update(agent_info)
expected.update(autograd_info)
# NB: Key ordering is only preserved in python 3.6+. So here, we
# manually check keys are equal.
for key in expected.keys():
self.assertIn(key, info.keys())
for key in info.keys():
self.assertIn(key, expected.keys())
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_handle_send_exceptions(self):
# test that if a callee node has gone down, we raise an appropriate
# exception instead of just crashing.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc._set_rpc_timeout(10)
# This barrier is needed to ensure that some workers do not exit before
# others have been brought up.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
if self.rank == 1:
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# allow destination worker to exit without joining
error_str = self.get_shutdown_error_regex()
wait_until_node_failure(dst_rank, error_str)
fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3))
# Shutdown sequence is not very well defined and as a result
# we can see any of the error messages defined in get_shutdown_error_regex.
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
# exit all workers non-gracefully.
rpc.shutdown(graceful=False)
@dist_init
def test_deadlock(self):
# this test is copied from https://github.com/pytorch/pytorch/issues/45089
if self.rank == 1:
dst1 = worker_name((self.rank + 1) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait()
dist_initialized = dist.is_initialized()
if not dist_initialized:
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
@dist_init(setup_rpc=False)
def test_local_shutdown_with_rpc(self):
# test that we can start RPC, send RPCs, and then run local shutdown.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# A barrier is needed to ensure that all RPCs are processed.
# Otherwise, some RPCs can timeout since the receiving end
# has terminated.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_set_and_get_default_rpc_timeout(self):
timeout = 0.5
# A new `RpcBackendOptions` is constructed
# when accessing `self.rpc_backend_options`.
rpc_backend_options = self.rpc_backend_options
rpc_backend_options.rpc_timeout = timeout
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
set_timeout = rpc.get_rpc_timeout()
self.assertEqual(timeout, set_timeout)
rpc.shutdown()
@dist_init
def test_default_timeout_used(self):
"""
Tests that if no timeout is passed into rpc_async and rpc_sync, then the
default timeout is used.
"""
dst_rank = (self.rank + 1) % self.world_size
rpc._set_rpc_timeout(0.001) # 1 ms
# futures should time out and be marked with an exception indicating it as such.
futs = [
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=())
for _ in range(10)
]
expected_error = self.get_timeout_error_regex()
for fut in futs:
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# ensure that if a new timeout is set old futures don't time out but new ones do.
rpc._set_rpc_timeout(200) # 200 seconds
# create a longstanding RPC.
fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
# now, set a short timeout.
rpc._set_rpc_timeout(0.001)
# fut2 should time out, fut1 should not.
fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut2.wait()
fut1.wait()
# Zero timeout means infinity, so future should run to completion.
rpc._set_rpc_timeout(0)
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait()
# reset to default timeout so shutdown messages can process cleanly.
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init
def test_rpc_timeouts(self):
# TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803)
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
timeout = 0.1 # 100 ms
expected_error = self.get_timeout_error_regex()
# Test async UDF
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait()
# Test sync UDF
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# If we set a default timeout for RPCs, it should be respected, though
# still overridden if we pass in a different timeout to the APIs.
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# The RPCs should run to completion since we override the timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5)
# Passing in a zero timeout should ensure that the RPC won't time out.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0)
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
def test_dist_init_decorator(self):
@dist_init(setup_rpc=False)
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
@dist_init
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
def test_use_rpc_pickler(self):
class TestPickler:
pass
test_pickler = TestPickler()
with _use_rpc_pickler(test_pickler):
self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler)
self.assertTrue(
torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler
)
@dist_init
def test_wait_all(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
self.assertTrue(len(_thread_local_var.future_list) == 1)
self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future))
self.assertTrue(fut.done())
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_multiple_call(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
for i in range(20):
fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1))
res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1))
self.assertEqual(res, torch.ones(i, i) + 1)
self.assertEqual(fut.wait(), torch.ones(i, i) + 1)
self.assertTrue(len(_thread_local_var.future_list) == 20)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_timeout(self):
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
timeout = 0.1 # 100 ms
fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_user_func(self):
with self.assertRaises(ValueError):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_body(self):
with self.assertRaises(ValueError):
with _wait_all():
raise_func()
self.assertFalse(hasattr(_thread_local_var, "future_list"))
timed_out_rpc_event = None
@staticmethod
def timed_out_rpc():
RpcTest.timed_out_rpc_event.wait()
@dist_init
def test_wait_all_exit_early_python(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func)
fut3 = rpc.rpc_async(dst, raise_func)
# We should receive the error from fut2
with self.assertRaisesRegex(ValueError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_builtin(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, "size of tensor"):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_script_function(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_function_not_on_callee(self):
# test that if a function does not exist on a callee, we don't crash,
# instead we get an AttributeError indicating that the func does not exist.
this_module = sys.modules[__name__]
caller_worker = "worker0"
callee_worker = "worker1"
if self.rank == 1:
# Use delattr to remove the binding of a func on this nodes
delattr(this_module, "foo_add")
# notify remote end that we have removed it.
rpc.rpc_sync(caller_worker, set_value, args=(self.rank,))
if self.rank == 0:
# func exists on caller, but not callee.
# wait for remote end to remove the binding of foo_add func.
wait_for_value_future()
# Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error.
self.assertTrue(hasattr(this_module, "foo_add"))
with self.assertRaisesRegex(
RuntimeError, "RPC pickler does not serialize"
):
rpc.rpc_sync(callee_worker, foo_add, args=())
@dist_init
def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
a = MyClass(1)
b = MyClass(2)
# This is to make Python not garbage collect a and b.
a.other = b
b.other = a
n = self.rank
a.rref = rpc.remote(
dst_worker_name,
torch.add,
args=(torch.ones(n, n), 2)
)
@dist_init(setup_rpc=False)
def test_use_rref_after_shutdown(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# pass in graceful=True to ensure that local UserRRefs are deleted.
rpc.shutdown(graceful=True)
with self.assertRaisesRegex(
RuntimeError, "Cannot call to_here\\(\\) on it after deletion."
):
rref.to_here()
with self.assertRaisesRegex(
RuntimeError, "Cannot call fork an UserRRef after deletion."
):
import torch.distributed.rpc.internal as internal
internal.serialize(rref)
@staticmethod
def _return_gpu_tensor():
return torch.rand(3, 3).cuda(0)
@staticmethod
def _return_gpu_tensor_list():
return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)]
@staticmethod
def _gpu_tensor_list_arg(tensor_list):
return torch.rand(3, 3)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank),
torch.add,
args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_py_pickle_not_supported(self):
local_rref = RRef(35)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"):
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),))
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont))
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError,
"my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError,
"unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_futs = []
for idx in range(num_cbs):
cb_futs.append(fut.then(partial(callback, idx)))
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(
cb_futs[idx].wait(),
torch.ones(n, n) * 2 + idx
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
dst = worker_name(n % self.world_size)
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
add_use_future_cb,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(
dst,
torch.add,
args=(fut0.wait(), 1)
).then(lambda fut1: fut1.wait() + 1)
return fut2.wait()
fut3 = rpc.rpc_async(
dst,
torch.add,
args=(torch.ones(2, 2), 1)
).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
TypeError,
"incompatible function arguments."
):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1)
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
func,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_raise_func,
mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_wrong_type,
mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(
async_add_with_future_ctor,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add,
RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add,
RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.REMOTE
)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError,
"Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
return_future,
mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_rpc_then_pg(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init
def test_wait_all_with_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init
def test_wait_all_with_partial_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1)))
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491",
)
def test_init_rpc_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Wait for all init to complete.
dist.barrier()
# Use a different file name for the next initialization
new_backend_options = self.rpc_backend_options
new_backend_options.init_method += "init_2"
# Ensure rpc initialization works again.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=new_backend_options,
)
# Verify RPCs work after re-init.
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
rpc.rpc_sync(dst, foo_add, args=())
rpc.shutdown()
def test_wrong_types(self):
with self.assertRaisesRegex(
TypeError,
"Argument backend must be a member of BackendType",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend="TENSORPIPE",
)
with self.assertRaisesRegex(
TypeError,
"Argument rpc_backend_options must be an instance of RpcBackendOptions",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=self.rpc_backend,
rpc_backend_options={"init_method": self.init_method}
)
def test_cannot_infer_backend_from_options(self):
# An exception should be raised if the backend isn't specified but
# options are given which are not an instance of any of the known
# agents' option classes.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, "Could not infer backend for options"):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
@dist_init
def test_owner_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(10, 10, requires_grad=True)
rref = rpc.RRef(t1.sum() + t1.sum())
rref.backward()
expected_grad = torch.ones_like(t1) * 2
self.assertEqual(expected_grad, t1.grad)
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id)
self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1])
# Double backward.
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1])
# Test errors.
with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"):
rpc.RRef(torch.rand(10)).backward()
with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"):
rpc.RRef(torch.rand(10, requires_grad=True)).backward()
with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"):
rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100)
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rpc.RRef("foo").backward()
@staticmethod
def _sum(x):
return x.sum()
@staticmethod
def _identity(x):
return x
@dist_init
def test_user_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t = torch.rand(10, requires_grad=True)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._sum, args=(t,))
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t])
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._identity, args=("foo",))
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rref.backward(context_id)
with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"):
rref.backward()
@dist_init(setup_rpc=False)
def test_shutdown_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
if self.rank != 0:
og_func = rpc.api._broadcast_to_followers
og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs
# Monkey-patch _broadcast_to_followers to fail, which would ensure
# _all_gather on leader raises an exception.
def raise_error(sequence_id, objects_map):
og_func(sequence_id, objects_map)
raise RuntimeError('simulation')
# Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail,
# which would ensure barrier is not called on followers.
def rref_error():
raise RuntimeError('simulation rref')
try:
rpc.api._broadcast_to_followers = raise_error
rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error
with self.assertRaisesRegex(RuntimeError, 'simulation rref'):
rpc.shutdown()
finally:
rpc.api._broadcast_to_followers = og_func
rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func
else:
with self.assertRaisesRegex(RuntimeError, 'timed out in _all_gather'):
rpc.shutdown()
dist.barrier()
@dist_init
def test_my_parameter_server(self):
self._my_parameter_server(False)
class CudaRpcTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(2)
@dist_init
def test_profiler_remote_cuda(self):
if self.rank != 1:
return
dst_cuda_0 = (self.rank + 1) % self.world_size
dst_cuda_1 = (self.rank + 2) % self.world_size
dst_worker_cuda_0 = worker_name(dst_cuda_0)
dst_worker_cuda_1 = worker_name(dst_cuda_1)
with _profile(use_cuda=True) as p:
fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, ))
fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, ))
fut1.wait()
fut2.wait()
def get_name(event):
return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):]
function_events = p.function_events
for event in function_events:
if event.is_async:
self.assertEqual(0, event.cuda_time_total)
self.assertEqual([], event.kernels)
self.assertEqual(0, event.cuda_time)
else:
if event.node_id == 1:
continue
self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1])
if get_name(event) in EXPECTED_REMOTE_EVENTS:
self.assertGreater(event.cuda_time_total, 0)
self.assertEqual(1, len(event.kernels))
kernel = event.kernels[0]
if event.node_id == dst_cuda_0:
self.assertEqual(kernel.device, 0)
if event.node_id == dst_cuda_1:
self.assertEqual(kernel.device, 1)
self.assertGreater(event.cuda_time, 0)
# Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled
# events.
remote_events = [event for event in function_events if event.is_remote]
remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS]
self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS))
class TensorPipeAgentRpcTest(RpcAgentTestFixture, RpcTestCommon):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# TensorPipeRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.init_method,
_transports=tp_transports()
)
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=NUM_THREADS,
_transports=tp_transports(),
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
# Set a high timeout since it doesn't affect test runtime and ensures
# the test doesn't erroneously timeout due to slow machines.
timeout = 100
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
_transports=tp_transports(),
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
)
@dist_init
def _test_rref_get_type_timeout(self, blocking):
# Test where we try to get the type of a RRef from an owner, but RRef
# creation is slower than timeout passed into _get_type.
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.5
expected_err = self.get_timeout_error_regex()
# Blocking: blocks on inline call
if blocking:
with self.assertRaisesRegex(RuntimeError, expected_err):
slow_rref._get_type(timeout=timeout, blocking=blocking)
# Non-blocking: blocks on wait
else:
fut = slow_rref._get_type(timeout=timeout, blocking=blocking)
with self.assertRaisesRegex(RuntimeError, expected_err):
fut.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
def test_rref_get_type_timeout_blocking(self):
self._test_rref_get_type_timeout(blocking=True)
def test_rref_get_type_timeout_non_blocking(self):
self._test_rref_get_type_timeout(blocking=False)
@dist_init
def test_op_with_invalid_args(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Overloaded torch operator invoked from Python failed to many any schema"
):
rpc.rpc_sync(dst, torch.add, args=())
def _test_rref_proxy_timeout(self, rref_proxy_api):
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), ))
# Ensure RRef is created on remote node.
rref.to_here()
rref_api = getattr(rref, rref_proxy_api)
self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}")
expected_error = self.get_timeout_error_regex()
timeout = 2
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2))
if rref_api == rref.rpc_async:
result.wait()
elif rref_api == rref.remote:
result._get_future().wait()
# Case where rpc.remote() is stuck and exceeds timeout
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.01
rref_api = getattr(slow_rref, rref_proxy_api)
# Note that even when we call rref.rpc_async() in this case, we
# time out in future creation, not waiting for future. This is because
# rref proxy function calls rref._get_type before returning future,
# which blocks on the RRef being created on owner node, until the
# specified timeout.
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2))
# rpc_async returns immediately and surface a timeout through wait()
if rref_api == slow_rref.rpc_async:
result.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
@dist_init
def test_rref_proxy_timeout(self):
for rpc_api in ["rpc_sync", "rpc_async", "remote"]:
self._test_rref_proxy_timeout(rpc_api)
@dist_init
def test_send_to_rank_sparse(self):
dst_rank = (self.rank + 1) % self.world_size
# Test sparse tensor
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
x = build_sparse_tensor()
y = build_sparse_tensor()
expected_tensor = (x + y)
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y))
self.assertEqual(expected_tensor, ret)
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
x = build_sparse_tensor(coalesce=True)
y = build_sparse_tensor(coalesce=True)
expected_tensor = (x + y)
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y))
self.assertEqual(expected_tensor, ret)
@dist_init
def test_self_py_udf_remote_sparse(self):
self._self_py_udf_remote(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_rpc_arg_sparse(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_rpc_arg(
dst,
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_self_rpc_arg_sparse(self):
self._self_remote_rref_as_rpc_arg(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_remote_arg_sparse(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_remote_arg(
dst,
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_self_remote_arg_sparse(self):
self._self_remote_rref_as_remote_arg(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
def test_world_size_one_sparse(self):
self._world_size_one(
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_multi_rpc_sparse(self):
self._multi_rpc(True)
def test_wait_all_workers_sparse(self):
self._wait_all_workers(heavy_rpc_sparse, build_sparse_tensor())
def test_wait_all_workers_twice_sparse(self):
self._wait_all_workers_twice(heavy_rpc_sparse, build_sparse_tensor())
@dist_init
def test_py_sparse_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [build_sparse_tensor(), build_sparse_tensor()]
ret = rpc.rpc_sync(
worker_name(dst_rank), my_container_sum, args=(a,)
)
self.assertEqual(ret, my_container_sum(a))
@dist_init
def test_nested_rpc_sparse(self):
self._nested_rpc(nested_rpc_sparse, build_sparse_tensor() * 2)
@dist_init
def test_stress_heavy_rpc_sparse(self):
self._stress_test_rpc(heavy_rpc_sparse, repeat=20, args=(build_sparse_tensor(),))
@dist_init
def test_builtin_remote_ret_sparse(self):
self._builtin_remote_ret(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 2
)
@dist_init
def test_builtin_remote_self_sparse(self):
self._builtin_remote_self(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 2
)
@dist_init
def test_multi_builtin_remote_ret_sparse(self):
self._test_multi_remote_call(
torch.add, True,
args_fn=RpcTest._multi_args_fn
)
@dist_init
def test_multi_py_udf_remote_sparse(self):
self._test_multi_remote_call(
my_function,
True,
kwargs_fn=RpcTest._multi_kwargs_fn
)
@dist_init
def test_py_rref_args_sparse(self):
self._py_rref_args(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 4
)
@dist_init
def test_py_rref_args_user_share_sparse(self):
self._py_rref_args_user_share(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 6
)
@dist_init
def test_py_rpc_rref_args_sparse(self):
self._py_rpc_rref_args(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 6
)
@dist_init
def test_nested_remote_sparse(self):
self._nested_remote(
nested_remote_sparse,
build_sparse_tensor() + build_sparse_tensor()
)
@dist_init
def test_nested_rref_sparse(self):
self._nested_rref(
nested_rref_sparse,
build_sparse_tensor() * 2,
build_sparse_tensor() * 2
)
@dist_init
def test_nested_rref_stress_sparse(self):
self._nested_rref_stress(
nested_rref_sparse,
build_sparse_tensor() * 2,
build_sparse_tensor() * 2
)
@dist_init
def test_my_parameter_server_sparse(self):
self._my_parameter_server(True)
# Test init_rpc without world_size argument
@dist_init(setup_rpc=False)
def test_dynamic_rpc_init_rpc(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Dynamic RPC new ranks communicate with existing ranks
@dist_init(setup_rpc=False)
def test_dynamic_rpc_new_rank_can_communicated_with_existing_rank(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank == 0:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
# Rank 0 will be initialized with RPC after this barrier
dist.barrier()
if self.rank != 0:
# Newly joined ranks will be able to communicate with rank 0, since that was created first
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
result = rpc.rpc_sync(worker_name(0), torch.add, args=(torch.tensor(1), torch.tensor(1)))
self.assertEqual(torch.add(torch.tensor(1), torch.tensor(1)), result)
# Barrier to ensure that all rpc_sync calls are finished
dist.barrier()
rpc.shutdown()
# Dynamic RPC existing ranks can communicate with new ranks
@dist_init(setup_rpc=False)
def test_dynamic_rpc_existing_rank_can_communicate_with_new_rank(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank == 0:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
# Rank 0 will be initialized with RPC after this barrier
dist.barrier()
# Rest of ranks join after barrier
if self.rank != 0:
# Newly joined ranks will be able to communicate with rank 0, since that was created first
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
dist.barrier()
if self.rank == 0:
for i in range(1, self.world_size):
result = rpc.rpc_sync(worker_name(i), torch.add, args=(torch.tensor(1), torch.tensor(1)))
self.assertEqual(torch.add(torch.tensor(1), torch.tensor(1)), result)
# Barrier to ensure that all rpc_sync calls are finished
dist.barrier()
rpc.shutdown()
# Dynamic RPC existing ranks can communicate with new ranks using CUDA rpc
@skip_if_lt_x_gpu(2)
@dist_init(setup_rpc=False)
def test_dynamic_rpc_existing_rank_can_communicate_with_new_rank_cuda(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank == 0:
options = self.rpc_backend_options
for i in range(1, self.world_size):
dst = worker_name(i)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 1})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=options,
)
# Rank 0 will be initialized with RPC after this barrier
dist.barrier()
# Rest of ranks join after barrier
if self.rank != 0:
# Newly joined ranks will be able to communicate with rank 0, since that was created first
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
dist.barrier()
if self.rank == 0:
for i in range(1, self.world_size):
x = torch.ones(2)
result_on_device_0 = rpc.rpc_sync(worker_name(i), torch.add, args=(x.to(0), 1))
result_on_device_1 = rpc.rpc_sync(worker_name(i), torch.add, args=(x.to(1), 1))
self.assertEqual(torch.add(torch.ones(2), 1), result_on_device_0)
self.assertEqual(torch.device('cuda:0'), result_on_device_0.device)
self.assertEqual(torch.add(torch.ones(2), 1), result_on_device_1)
self.assertEqual(torch.device('cuda:1'), result_on_device_1.device)
# Barrier to ensure that all rpc_sync calls are finished
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_dynamic_rpc_init_rpc_without_rank(self):
# default initialization uses file init
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rpc_backend_options=self.rpc_backend_options,
)
# env init
with self.assertRaisesRegex(ValueError, "environment variable RANK expected"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method="env://")
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rpc_backend_options=rpc_backend_options,
)
# tcp init
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method="tcp://127.0.0.1:23456")
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rpc_backend_options=rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_dynamic_and_static_init_rpc_together(self):
# Initialize a static rpc group with size = self.world_size - 1
dist.init_process_group(
backend='gloo',
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size)
world_size_minus_one = self.world_size - 1
if self.rank < world_size_minus_one:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=world_size_minus_one,
rpc_backend_options=self.rpc_backend_options,
)
dist.barrier()
# Attempt to add an additional dynamic group member
if self.rank == world_size_minus_one:
# Expect error message to be thrown
with self.assertRaisesRegex(RuntimeError, "RPC group mixes statically and dynamically\
initialized members which is not supported."):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture, RpcTestCommon):
def _test_device_maps(self, options, errMsg):
with self.assertRaisesRegex(ValueError, errMsg):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
self.assertFalse(rpc.api._is_current_rpc_agent_set())
@skip_if_lt_x_gpu(2)
def test_device_maps_wrong_worker_name(self):
options = self.rpc_backend_options
options.set_device_map("none_exist", {0: 1})
self._test_device_maps(
options,
errMsg="Node worker0 has invalid target node names in its device maps"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_local_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {torch.cuda.device_count(): 0})
self._test_device_maps(
options,
errMsg="Node worker0 has source devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_remote_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: torch.cuda.device_count()})
self._test_device_maps(
options,
errMsg="Node worker0 has target devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_many_to_one(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 0})
self._test_device_maps(
options,
errMsg="Node worker0 has duplicated target devices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_one_to_many(self):
if self.rank == 0:
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1})
with self.assertRaisesRegex(
ValueError, "`set_device_map` only supports 1-to-1 mapping"
):
options.set_device_map(dst, {0: 0})
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_min_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {-1: 0})
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {0: -1})
@staticmethod
def _gpu_add(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]):
return (x + y).to(0)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_gpu(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1, 1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add,
args=(torch.zeros(2).to(0), torch.ones(2).to(0))
)
self.assertEqual(ret.device, torch.device(1))
self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1))
rpc.shutdown()
@staticmethod
def _gpu_add_given_devices(x, y, x_to, y_to, z_to):
x_device = "cpu" if x.device.type == "cpu" else x.device.index
y_device = "cpu" if y.device.type == "cpu" else y.device.index
if x_device == x_to and y_device == y_to:
return x.to(z_to) + y.to(z_to)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None):
fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn
x_to = device_map[x_from]
y_to = device_map[y_from]
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(x_from)
y = torch.ones(2).to(y_from)
ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to))
reverse_device_map = {device_map[k] : k for k in device_map}
z_from = reverse_device_map[z_to]
ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index
self.assertEqual(ret_device, z_from)
self.assertEqual(ret, torch.ones(2).to(z_from))
rpc.shutdown()
def test_device_map_cpu(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to="cpu",
device_map={"cpu" : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_cpu_to_gpu_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=0,
device_map={"cpu" : 0},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_cpu_to_gpu_non_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=1,
device_map={"cpu" : 1},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_gpu_to_cpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to="cpu",
device_map={0 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_to_cpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to="cpu",
device_map={1 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=0,
device_map={0 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=1,
device_map={1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default_to_non_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=1,
device_map={0 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default_to_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=0,
device_map={1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@staticmethod
def _gpu_add_multi_gpu(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]):
return x.to(0) + y, x - y.to(1)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_multi_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(1)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(x, y)
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_multi_gpu(dst)
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_multi_gpu(dst)
@staticmethod
def _gpu_add_return_to_gpu(x, y):
if x.device.type == 'cpu' and y.device.type == 'cpu':
return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_in_options(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method=options.init_method,
num_worker_threads=options.num_worker_threads,
device_maps={dst: {0: 1, 1: 0}},
_transports=tp_transports()
)
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(0), torch.ones(2).to(1))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
def _test_device_maps_return_to_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 2})
options.set_device_map(dst, {2: 3})
options.set_device_map(dst, {3: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu,
args=(torch.zeros(2), torch.ones(2))
)
for i in range(len(rets)):
self.assertEqual(rets[i].device, torch.device((3 + i) % 4))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1))
self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2))
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_return_to_gpu(dst)
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_return_to_gpu(dst)
@staticmethod
def _add_to_gpu(x, y):
return (x + y).to(0)
def _test_device_maps_missing_config(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = (
"TensorPipe RPC backend only supports CPU tensors by default.*"
"`set_device_map` on `TensorPipeRpcBackendOptions`"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1))
elif mode == RPCExecMode.REMOTE:
rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
def _test_device_maps_missing_config_response(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = "Response device mapping is not available"
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
elif mode == RPCExecMode.REMOTE:
rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config(self):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
def test_device_maps_missing_config_not_timeout(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options
)
timeout = rpc.get_rpc_timeout()
tik = time.time()
self._test_device_maps_missing_config(RPCExecMode.SYNC)
rpc.shutdown()
tok = time.time()
self.assertTrue(tok - tik < timeout)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote(self):
self._test_device_maps_missing_config(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(2)
def test_device_maps_remote(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
self.assertEqual(rref.to_here().device.index, 1)
self.assertEqual(rref.to_here(), torch.ones(2).to(1))
rpc.shutdown()
@staticmethod
def _slow_add_on_user_stream(x, y):
s0 = torch.cuda.current_stream(x.device)
s1 = torch.cuda.Stream(device=x.device)
s1.wait_stream(s0)
x.record_stream(s1)
y.record_stream(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
z = x + y
s0.wait_stream(s1)
z.record_stream(s0)
return z
def _test_custom_stream(self, fn, device_map):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
fn(dst)
rpc.shutdown()
def _test_stream_sync(self, dst):
x = torch.ones(2, 2).to(0)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
self.assertEqual(ret, 2 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream(self):
self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"})
def _test_stream_multi_async(self, dst):
futs = []
for i in range(20):
x = torch.ones(2, 2).to(0) * i
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
)
for i in range(20):
self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i)
@skip_if_lt_x_gpu(2)
def test_custom_stream_multi(self):
self._test_custom_stream(
self._test_stream_multi_async,
{"cuda:0": "cuda:1"}
)
@staticmethod
def _nested_slow_add_on_user_stream(dst, x, y, z):
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, y)
)
return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z)
def _test_stream_nested_sync(self, dst):
x = torch.ones(2, 2).to(0)
y = torch.ones(2, 2).to(0) * 2
z = torch.ones(2, 2).to(0) * 3
nested_dst = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
self.assertEqual(ret, 6 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested(self):
self._test_custom_stream(
self._test_stream_nested_sync,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
def _test_stream_nested_multi_async(self, dst):
if self.rank == 0:
futs = []
n = 5
xs, ys, zs = [], [], []
for i in range(n):
x = torch.ones(2, 2).to(0) * (i - 1)
y = torch.ones(2, 2).to(0) * i
z = torch.ones(2, 2).to(0) * (i + 1)
xs.append(x)
ys.append(y)
zs.append(z)
nested_dst = worker_name((self.rank + 2) % self.world_size)
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
)
for i in range(n):
self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i])
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested_multi(self):
self._test_custom_stream(
self._test_stream_nested_multi_async,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
@staticmethod
def _gpu_add_wrong_gpus(x, y):
if x.is_cuda and y.is_cuda:
return x.cpu() + y.cuda()
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(1)
def test_device_mismatch(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(0)
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices"
):
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus,
args=(x, y)
)
rpc.shutdown()
def _test_rref_synchronization(self, local_device, remote_device):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device : remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
x = torch.randn(200, 1, 28, 28).to(local_device)
actual = rref.remote().forward(x).to_here()
expected = rref.rpc_sync().forward(x)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_to_here_synchronization1(self):
self._test_rref_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization2(self):
self._test_rref_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization3(self):
self._test_rref_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization4(self):
self._test_rref_synchronization("cuda:0", "cuda:1")
def _test_rref_as_arg_synchronization(
self,
local_device,
remote_device,
devicesOptions=None
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device: remote_device})
input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size)
options.set_device_map(input_src, {remote_device: local_device})
if devicesOptions is not None:
options.set_devices(devicesOptions[self.rank])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device))
actual = rref.remote().forward(rref_x, True).to_here()
expected = rref.rpc_sync().forward(rref_x, True)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization1(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization2(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization3(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization4(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization5(self):
self._test_rref_as_arg_synchronization(
"cuda:0",
"cuda:0",
[["cuda:0"] for _ in range(4)], # devicesOptions
)
@staticmethod
def _rref_relay(rref):
return rref.to_here()
def _test_rref_forward_synchronization(self, local_device, remote_device):
options = self.rpc_backend_options
input_src = worker_name(0)
model_dst = worker_name(1)
out_relay = worker_name(2)
if self.rank == 0:
# for 1) model construction 2) forward execution
options.set_device_map(model_dst, {local_device: remote_device})
# Forward output will be first copied to the relay node before
# returning to the worker. This is intentional, to test RRef
# forward CUDA stream synchronizations.
options.set_device_map(out_relay, {local_device: local_device})
elif self.rank == 1:
# worker1 hosts the model and runs forward. The forward functions
# calls RRef.to_here(), hence needs to configure the device map
options.set_device_map(input_src, {remote_device: local_device})
elif self.rank == 2:
# worker2 will get the out RRef and call to_here() and hence, needs
# to configure devcie map.
options.set_device_map(model_dst, {local_device: remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device))
rref_out = rref.remote().forward(rref_input, True)
out = rpc.remote(
out_relay,
TensorPipeAgentCudaRpcTest._rref_relay,
args=(rref_out,)
).to_here()
expected = rref.rpc_sync().forward(rref_input, True)
self.assertEqual(out, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_forward_synchronization1(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization2(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization3(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization4(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:1")
def _test_owner_rref_forward_synchronization(self, local_device, remote_device):
if self.rank == 0:
options = self.rpc_backend_options
options.set_device_map("w0", {local_device: remote_device})
rpc.init_rpc(
"w0",
rank=0,
world_size=1,
rpc_backend_options=options
)
model = rpc.remote(
"w0", torch.nn.Linear, (2048, 20000)
).remote().to(remote_device)
for _ in range(30):
data = torch.rand(2048, 2048).to(local_device)
output = model.rpc_sync().forward(data)
# to_here() internally calls localValue as the caller is
# the owner of the RRef.
v0 = rpc.RRef(output).remote().sum().to_here().item()
v1 = output.sum().item()
self.assertEqual(v0, v1)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_owner_rref_forward_synchronization1(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization2(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization3(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization4(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1")
@staticmethod
def _return_tensor_view(i):
x = torch.ones(1000, 200).cuda(0) * i
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
# serialization of the return value will create a new tensor from the
# view, which is done outside of the user function.
return x.split(100)[0]
@skip_if_lt_x_gpu(1)
def test_tensor_view_as_return_value(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
futs = []
for i in range(5):
futs.append(rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._return_tensor_view,
args=(i,)
))
for i in range(5):
self.assertEqual(torch.ones(100, 200) * i, futs[i].wait())
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected source devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
options.set_devices([1])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch_reverse(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected target devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
device_maps={dst: {0 : 1}},
devices=[0]
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_int(self):
fut = Future(devices=[0])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_str(self):
fut = Future(devices=["cuda:0"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_device(self):
fut = Future(devices=[torch.device("cuda", 0)])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_not_cuda(self):
with self.assertRaisesRegex(
ValueError, "Expected devices to have indices, got cpu"
):
fut = Future(devices=["cpu"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=False
)
@skip_if_lt_x_gpu(2)
def test_cuda_future_callback_changes_devices(self):
# We check proper CUDA stream synchronization by filling the tensor with
# the expected value in one stream, and reading it from another stream.
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:0", "cuda:1"])
def cb(fut):
t0 = fut.value()
tensor1.copy_(t0, non_blocking=True)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(2)
def test_cuda_future_value_on_bad_device(self):
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:1"])
# As a plus, we test that futures still invoke callbacks even in case of
# error, and that the child futures are successful if those callbacks
# don't access the parent future.
def cb(fut):
with torch.cuda.device("cuda:1"):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor1.fill_(1)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with self.assertRaisesRegex(
ValueError,
r"The result contained tensors residing on device\(s\) cuda:0 "
r"which are not among the expected device\(s\) cuda:1",
):
parent_future.wait()
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(1)
def test_async_execution_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t = torch.zeros((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 1).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_async_execution_nested_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
nested_dst = worker_name((self.rank + 2) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
a = torch.ones((100,), device="cuda:0")
b = torch.ones((100,), device="cuda:0")
c = torch.ones((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 3).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_modify_tensor_inplace(self):
tensor = torch.zeros((100,), device="cuda:0")
future = Future(devices=["cuda:0"])
future.set_result(tensor)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
tensor.fill_(1)
future.wait()
@skip_if_lt_x_gpu(1)
def test_cuda_future_replace_tensor(self):
tensor_list = [torch.zeros((100,), device="cuda:0")]
future = Future(devices=["cuda:0"])
future.set_result(tensor_list)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
# We set things up so that the original tensor contained in the list
# gets deleted once we replace it with the other one. This will
# invalidate any cached information held by the future.
tensor_list[0] = torch.ones((100,), device="cuda:0")
future.wait()
@skip_if_lt_x_gpu(1)
def test_rref_with_unpickleable_attributes(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),))
rref.rpc_sync().increase(1)
ret = rref.rpc_sync().sum()
self.assertEqual(ret, 42)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=True
)
| pytorch-master | torch/testing/_internal/distributed/rpc/rpc_test.py |
import threading
import torch
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
from torch import optim
from torch.distributed.optim import DistributedOptimizer
from torch.testing._internal.dist_utils import dist_init
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
class MyModule:
lock = threading.Lock()
def __init__(self, requires_grad=True):
# cannot directly use torch.manual_seed(0) as all threads share the same
# default generator. The race from multiple RPC threads could mess up
# the draw order from the default RNG instance, leading to
# non-deterministic behavior. Hence, create a dedicated RNG here.
g_cpu = torch.Generator()
g_cpu.manual_seed(0)
self.w = torch.rand((3, 3), requires_grad=requires_grad, generator=g_cpu)
def forward(self, t1):
return torch.mm(self.w, t1)
def get_w(self):
return self.w
class FailingOptimizer(optim.Optimizer):
def __init__(self, params):
super().__init__(params, {})
def step(self, closure=None):
raise ValueError("Error running optimizer.")
class OptimizerFailingOnConstructor(optim.Optimizer):
def __init__(self, params):
super().__init__(params, {})
raise ValueError("Error creating optimizer.")
def step(self, closure=None):
raise NotImplementedError
def _call_method(method, obj_rref, *args, **kwargs):
return method(obj_rref.local_value(), *args, **kwargs)
def remote_method(method, obj_rref, *args, **kwargs):
"""
Call rpc.remote on a method in a remote object.
Args:
method: the method (for example, Class.method)
obj_rref (RRef): remote reference to the object
args: positional arguments to pass to the method
kwargs: keyword arguments to pass to the method
Returns a RRef to the remote method call result.
"""
return rpc.remote(
obj_rref.owner(),
_call_method,
args=[method, obj_rref] + list(args),
kwargs=kwargs,
)
def rpc_async_method(method, obj_rref, *args, **kwargs):
"""
Call rpc.rpc_async on a method in a remote object.
Args:
method: the method (for example, Class.method)
obj_rref (RRef): remote reference to the object
args: positional arguments to pass to the method
kwargs: keyword arguments to pass to the method
Returns a Future to the method call result.
"""
return rpc.rpc_async(
obj_rref.owner(),
_call_method,
args=[method, obj_rref] + list(args),
kwargs=kwargs,
)
class DistOptimizerTest(RpcAgentTestFixture):
@dist_init()
def test_dist_optim_exception(self):
# distributed version
owner1 = "worker%d" % ((self.rank + 1) % self.world_size)
owner2 = "worker%d" % ((self.rank + 2) % self.world_size)
remote_module1 = rpc.remote(owner1, MyModule)
remote_module2 = rpc.remote(owner2, MyModule)
remote_param1 = remote_method(MyModule.get_w, remote_module1)
remote_param2 = remote_method(MyModule.get_w, remote_module2)
dist_optim = DistributedOptimizer(
FailingOptimizer, [remote_param1, remote_param2]
)
with dist_autograd.context() as context_id:
g_cpu = torch.Generator()
g_cpu.manual_seed(0)
t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
output1 = rpc_async_method(MyModule.forward, remote_module1, t2)
output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait())
loss = torch.add(output2.wait(), t1).sum()
dist_autograd.backward(context_id, [loss])
with self.assertRaisesRegex(Exception, "Error running optimizer"):
dist_optim.step(context_id)
@dist_init()
def test_dist_optim_exception_on_constructor(self):
# distributed version
owner1 = "worker%d" % ((self.rank + 1) % self.world_size)
owner2 = "worker%d" % ((self.rank + 2) % self.world_size)
remote_module1 = rpc.remote(owner1, MyModule)
remote_module2 = rpc.remote(owner2, MyModule)
remote_param1 = remote_method(MyModule.get_w, remote_module1)
remote_param2 = remote_method(MyModule.get_w, remote_module2)
with self.assertRaisesRegex(Exception, "Error creating optimizer."):
dist_optim = DistributedOptimizer(
OptimizerFailingOnConstructor, [remote_param1, remote_param2]
)
def _test_dist_optim_base(self, optim_cls, *args, **kwargs):
# local version
module1 = MyModule()
module2 = MyModule()
params = [module1.get_w(), module2.get_w()]
local_optim = optim_cls(params, *args, **kwargs)
old_w1 = module1.w.clone().detach()
old_w2 = module2.w.clone().detach()
g_cpu = torch.Generator()
g_cpu.manual_seed(0)
t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
output1 = module1.forward(t2)
output2 = module2.forward(output1)
loss = torch.add(output2, t1).sum()
loss.backward()
local_optim.step()
# distributed version
owner1 = "worker%d" % ((self.rank + 1) % self.world_size)
owner2 = "worker%d" % ((self.rank + 2) % self.world_size)
remote_module1 = rpc.remote(owner1, MyModule)
remote_module2 = rpc.remote(owner2, MyModule)
remote_param1 = remote_method(MyModule.get_w, remote_module1)
remote_param2 = remote_method(MyModule.get_w, remote_module2)
old_w1_remote = remote_param1.to_here()
# sanity check: local and remote initial weights should match
self.assertEqual(old_w1, remote_param1.to_here())
self.assertEqual(old_w2, remote_param2.to_here())
dist_optim = DistributedOptimizer(
optim_cls, [remote_param1, remote_param2], *args, **kwargs
)
with dist_autograd.context() as context_id:
g_cpu.manual_seed(0)
t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
output1 = rpc_async_method(MyModule.forward, remote_module1, t2)
output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait())
loss = torch.add(output2.wait(), t1)
dist_autograd.backward(context_id, [loss.sum()])
dist_optim.step(context_id)
new_w1 = rpc_async_method(MyModule.get_w, remote_module1).wait()
new_w2 = rpc_async_method(MyModule.get_w, remote_module2).wait()
# ensure optimizer changed weights
self.assertNotEqual(old_w1, new_w1)
self.assertNotEqual(old_w2, new_w2)
# ensure local equals remote
self.assertEqual(new_w1, module1.get_w())
self.assertEqual(new_w2, module2.get_w())
@dist_init()
def test_dist_optim(self):
self._test_dist_optim_base(optim.Adagrad, lr=0.05)
self._test_dist_optim_base(optim.Adam, lr=1e-2, amsgrad=True)
self._test_dist_optim_base(optim.AdamW, lr=0.05, amsgrad=True)
self._test_dist_optim_base(optim.SGD, lr=0.05)
self._test_dist_optim_base(optim.SGD, lr=1e-3, momentum=1, weight_decay=1, nesterov=True)
self._test_dist_optim_base(optim.Adadelta, rho=0.95)
self._test_dist_optim_base(optim.RMSprop, lr=0.05)
self._test_dist_optim_base(optim.Adamax, lr=0.05)
self._test_dist_optim_base(optim.Rprop, lr=0.05)
def _test_dist_optim_none_grads(self, optim_cls, *args, **kwargs):
# local version
module1 = MyModule()
module2 = MyModule(requires_grad=False)
params = [module1.get_w(), module2.get_w()]
local_optim = optim_cls(params, *args, **kwargs)
old_w1 = module1.w.clone().detach()
old_w2 = module2.w.clone().detach()
g_cpu = torch.Generator()
g_cpu.manual_seed(0)
t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
output1 = module1.forward(t2)
output2 = module2.forward(output1)
loss = torch.add(output2, t1).sum()
loss.backward()
local_optim.step()
# distributed version
owner1 = "worker%d" % ((self.rank + 1) % self.world_size)
owner2 = "worker%d" % ((self.rank + 2) % self.world_size)
remote_module1 = rpc.remote(owner1, MyModule)
remote_module2 = rpc.remote(owner2, MyModule, args=(False,))
remote_param1 = remote_module1.remote().get_w()
remote_param2 = remote_module2.remote().get_w()
# sanity check: local and remote initial weights should match
self.assertEqual(old_w1, remote_param1.to_here())
self.assertEqual(old_w2, remote_param2.to_here())
dist_optim = DistributedOptimizer(
optim_cls, [remote_param1, remote_param2], *args, **kwargs
)
with dist_autograd.context() as context_id:
g_cpu.manual_seed(0)
t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
output1 = remote_module1.rpc_async().forward(t2)
output2 = remote_module2.rpc_async().forward(output1.wait())
loss = torch.add(output2.wait(), t1)
dist_autograd.backward(context_id, [loss.sum()])
dist_optim.step(context_id)
new_w1 = remote_module1.rpc_async().get_w().wait()
new_w2 = remote_module2.rpc_async().get_w().wait()
# ensure optimizer changed weights for w1
self.assertNotEqual(old_w1, new_w1)
# ensure optimizer not changed weights for w2
self.assertEqual(old_w2, new_w2)
# ensure local equals remote
self.assertEqual(new_w1, module1.get_w())
self.assertEqual(new_w2, module2.get_w())
@dist_init()
def test_dist_optim_none_grads(self):
self._test_dist_optim_none_grads(optim.SGD, lr=0.05)
self._test_dist_optim_none_grads(optim.RMSprop, lr=0.05)
self._test_dist_optim_none_grads(optim.Rprop, lr=0.05)
self._test_dist_optim_none_grads(optim.Adadelta, rho=0.95)
| pytorch-master | torch/testing/_internal/distributed/rpc/dist_optimizer_test.py |
pytorch-master | torch/testing/_internal/distributed/rpc/jit/__init__.py |
|
from typing import Dict, Tuple
import torch
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
from torch import Tensor
from torch.distributed.rpc import rpc_async
from torch.testing import FileCheck
from torch.testing._internal.dist_utils import dist_init, worker_name
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
@torch.jit.script
def local_add(t1, t2):
return torch.add(t1, t2)
@torch.jit.script
def remote_add(t1, t2, dst: str): # noqa: E999
return rpc_async(dst, local_add, (t1, t2)).wait()
@torch.jit.script
def fork_add(t1, t2, dst: str):
fut = torch.jit._fork(remote_add, t1, t2, dst)
return torch.jit._wait(fut)
class JitDistAutogradTest(RpcAgentTestFixture):
@dist_init
def test_get_gradients(self):
dst_rank = self.rank
@torch.jit.script
def dist_get_gradients(context_id: int) -> (Dict[Tensor, Tensor]):
return dist_autograd.get_gradients(context_id)
FileCheck().check("get_gradients").run(str(dist_get_gradients.graph))
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.add(t1, t2)
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(torch.ones(3, 3), grads[t1])
self.assertEqual(torch.ones(3, 3), grads[t2])
@dist_init
def test_dist_backward(self):
if self.rank != 0:
return
@torch.jit.script
def dist_backward_script(context_id: int, loss: torch.Tensor):
dist_autograd.backward(context_id, [loss])
FileCheck().check("dist_backward").run(str(dist_backward_script.graph))
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(3, 3, requires_grad=True)
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
loss = rpc.rpc_sync(dst_worker_name, torch.add, args=(t1, t2)).sum()
dist_backward_script(context_id, loss)
@dist_init
def test_jit_fork_within_context(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
res = fork_add(t1, t2, dst_worker_name)
loss = res.sum()
dist_autograd.backward(context_id, [loss])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
@dist_init
def test_restore_context_after_swtich_to_jit_thread(self):
if self.rank != 0:
return
@torch.jit.script
def forward_script(
context_id: int, dst_worker_name: str, t1: Tensor, t2: Tensor
) -> Tuple[Tensor, Tensor]:
res1_fut = rpc.rpc_async(dst_worker_name, local_add, (t1, t1))
res1 = res1_fut.wait() # After this, the script runs in a new JIT thread.
loss1 = res1.sum()
# SendRpcBackward is not attched, since DistAutogradContext is lost here.
res2_fut = rpc.rpc_async(dst_worker_name, local_add, (t2, t2))
res2 = res2_fut.wait()
loss2 = res2.sum()
return loss1, loss2
with dist_autograd.context() as context_id:
t1 = torch.ones((2, 3), requires_grad=True)
t2 = torch.ones((2, 3), requires_grad=True)
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
loss0, loss1 = forward_script(context_id, dst_worker_name, t1, t2)
dist_autograd.backward(context_id, [loss0, loss1])
grad0, grad1 = dist_autograd.get_gradients(context_id)
self.assertEqual(grad0, grad1)
| pytorch-master | torch/testing/_internal/distributed/rpc/jit/dist_autograd_test.py |
import time
import io
from typing import Dict, List, Tuple, Any
import torch
import torch.distributed as dist
import torch.distributed.rpc as rpc
from torch import Tensor
from torch.autograd.profiler import record_function
from torch.distributed.rpc import RRef
from torch.distributed.rpc.internal import RPCExecMode, _build_rpc_profiling_key
from torch.futures import Future
from torch.testing._internal.common_utils import TemporaryFileName
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.autograd.profiler_legacy import profile as _profile
def rref_isinstance(rref, cls_to_check):
return isinstance(rref.local_value(), cls_to_check)
def sleep(t):
time.sleep(t)
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
@torch.jit.script
def rref_local_value(rref: RRef[Tensor]) -> Tensor:
return rref.local_value()
@torch.jit.script
def list_create() -> List[int]:
global_list = [1, 2, 3]
return global_list
@torch.jit.script
def rref_list_mutate(rref: RRef[List[int]]) -> None:
rref.local_value().append(4)
rref.to_here().append(5)
rref.to_here(5.0).append(6)
def return_value(value: int) -> int:
return value
class RRefAPITest:
@dist_init
def test_rref_is_owner(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
rref_var = rpc_return_rref(dst_worker_name)
@torch.jit.script
def rref_tensor_is_owner(rref_var: RRef[Tensor]) -> bool:
return rref_var.is_owner()
res = rref_tensor_is_owner(rref_var)
self.assertEqual(res, False)
@dist_init
def test_rref_local_value(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
rref = rpc_return_rref(dst_worker_name)
with self.assertRaisesRegex(
RuntimeError, r"Can't call RRef.local_value\(\) on a non-owner RRef"
):
rref_local_value(rref)
ret = ret = rpc.rpc_sync(dst_worker_name, rref_local_value, (rref,))
self.assertEqual(ret, torch.add(torch.ones(2, 2), 1))
@dist_init
def test_local_rref_local_value(self):
if self.rank != 0:
return
dst_worker_name = worker_name(self.rank)
rref = rpc.remote(dst_worker_name, return_value, (5,), {})
ret = rref_local_value(rref)
self.assertEqual(ret, 5)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank), torch.add, args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank), script_check_rref_confirmed, args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank), script_check_rref_confirmed, args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_list_mutate(self):
dst = worker_name((self.rank + 1) % self.world_size)
list_rref = rpc.remote(dst, list_create)
rpc.rpc_sync(dst, rref_list_mutate, args=(list_rref,))
self.assertEqual(list_rref.to_here(), [1, 2, 3, 4, 5, 6])
@torch.jit.script
def no_arg():
return 0
@torch.jit.script
def one_arg(value):
return value + 1
@torch.jit.script
def script_add_ones(x):
return torch.add(x, torch.ones(1))
@torch.jit.script
def script_add_ones_with_record_function(x, block: str):
with record_function(block):
return torch.add(x, torch.ones(1))
@torch.jit.script
def record_function_on_caller_rpc_async(dst_worker_name: str, block: str) -> Tensor:
t: Tensor = torch.ones(1)
with record_function(block) as rf:
fut1 = rpc.rpc_async(dst_worker_name, script_add_ones, (t, ))
# Extra operator call to avoid de-duplication of the next async call
# see https://github.com/pytorch/pytorch/pull/62710#discussion_r694680279
zero = torch.zeros_like(t)
fut2 = rpc.rpc_async(dst_worker_name, script_add_ones, (t, ))
res = fut1.wait() + fut2.wait() + zero
return res
@torch.jit.script
def script_fork_wait_udf(tensor):
fut = torch.jit._fork(script_add_ones, tensor)
x = torch.jit._wait(fut)
return x
@torch.jit.script
def rref_to_here(rref_var: RRef[Tensor]) -> Tensor:
return rref_var.to_here()
@torch.jit.script
def return_rref(rref_var: RRef[Tensor]) -> RRef[Tensor]:
return rref_var
@torch.jit.script
def script_raise_func(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
@torch.jit.script
def script_fork_wait_throw(invalue):
fut = torch.jit._fork(script_raise_func, invalue)
value = torch.jit._wait(fut)
return value
@torch.jit.script
def call_rpc_with_profiling(handle: Tensor, dst_worker_name: str) -> Tensor:
# Call rpc_async from within ScriptFunction and ensure that we can attach
# profiling callbacks. Note that handle here is a Tensor representation of
# RecordFunction.
fut = rpc.rpc_async(dst_worker_name, one_arg, (torch.tensor(1),))
torch.ops.profiler._call_end_callbacks_on_jit_fut(handle, fut)
ret = fut.wait()
return ret
@torch.jit.script
def call_rpc_torchscript_with_record_function(dst_worker_name: str, block: str) -> Tensor:
fut = rpc.rpc_async(dst_worker_name, script_add_ones_with_record_function, (torch.tensor(1), block))
return fut.wait()
@torch.jit.script
def call_fork_with_profiling(handle: Tensor) -> Tensor:
# Call fork from within ScriptFunction and ensure that we can attach profiling
# callbacks to the resulting future. Note that handle here is a Tensor
# representation of RecordFunction.
fut = torch.jit._fork(one_arg, torch.tensor(1))
torch.ops.profiler._call_end_callbacks_on_jit_fut(handle, fut)
ret = fut.wait()
return ret
class MyScriptModuleWithRRefs(torch.jit.ScriptModule):
def __init__(self, dst_worker):
super().__init__()
self.rrefs = []
for _ in range(4):
self.rrefs.append(rpc_return_rref(dst_worker))
@torch.jit.script_method
def forward(self) -> Tensor:
res_tensor = torch.ones(2, 2)
for rref in self.rrefs:
res_tensor += rref.to_here()
return res_tensor
@torch.jit.ignore
def rref_python_annotation(rref_var: RRef[Tensor]) -> RRef[Tensor]:
return rref_var
@torch.jit.script
def rref_script_annotation(rref_var: RRef[Tensor]) -> Tensor:
return rref_python_annotation(rref_var).to_here()
class RRefTypingTest:
@dist_init
def test_rref_as_arg_and_return(self):
n = self.rank + 1
dst_rank = n % self.world_size
local_ret = one_arg(torch.ones(2, 2))
# create rref on current rank
rref = rpc.remote(worker_name(self.rank), one_arg, args=(torch.ones(2, 2),))
# pass rref to another user in rpc call
ret = rpc.rpc_sync(worker_name(dst_rank), rref_to_here, args=(rref,))
self.assertEqual(ret, local_ret)
# return rref in rpc call
rref1 = rpc.rpc_sync(worker_name(dst_rank), return_rref, args=(rref,))
self.assertEqual(rref1.to_here(), local_ret)
# pass rref to another user in remote call
rref2 = rpc.remote(worker_name(dst_rank), rref_to_here, args=(rref,))
self.assertEqual(rref2.to_here(), local_ret)
# return rref in remote call
rref3 = rpc.remote(worker_name(dst_rank), return_rref, args=(rref,))
self.assertEqual(rref3.to_here().to_here(), local_ret)
@dist_init
def test_my_script_module_with_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
module_with_rrefs = MyScriptModuleWithRRefs(worker_name(dst_rank))
res = module_with_rrefs()
self.assertEqual(res, torch.ones(2, 2) * 9)
@dist_init
def test_rref_python_annotation(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_var = rpc_return_rref(worker_name(dst_rank))
res = rref_script_annotation(rref_var)
self.assertEqual(res, torch.ones(2, 2) + 1)
class FutureTypingTest:
@dist_init
def test_future_passed_between_python_and_jit(self):
dst_rank = (self.rank + 1) % self.world_size
inputs = (torch.tensor([1, 1]), torch.tensor([2, 2]))
ret_fut = rpc.rpc_async(worker_name(dst_rank), two_args_two_kwargs, args=inputs)
expected_res = torch.tensor([10, 10])
@torch.jit.script
def future_wait_in_script(fut: Future[Tensor]) -> Tensor:
return fut.wait()
self.assertEqual(future_wait_in_script(ret_fut), expected_res)
@torch.jit.script
def future_return_to_python(
dst_rank: int, inputs: Tuple[Tensor, Tensor]
) -> Future[Tensor]:
return rpc.rpc_async(
"worker{}".format(dst_rank), two_args_two_kwargs, inputs
)
fut_res = future_return_to_python(dst_rank, inputs)
self.assertEqual(fut_res.wait(), expected_res)
@dist_init
def test_future_python_annotation(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
input_0 = torch.ones(2, 2)
input_1 = 1
expected_res = torch.add(input_0, input_1)
@torch.jit.ignore
def python_return_future() -> Future[Tensor]:
fut = rpc.rpc_async(dst_worker_name, torch.add, (input_0, input_1), {})
return fut
@torch.jit.script
def script_use_future() -> Tensor:
fut = python_return_future()
return fut.wait()
res = script_use_future()
self.assertEqual(res, expected_res)
@torch.jit.script
class MyScriptClass:
def __init__(self, a: int):
self.a = a
def get_value(self) -> int:
return self.a
@torch.jit.interface
class MyModuleInterface(torch.nn.Module):
def forward(self) -> Tensor:
# pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
pass
class MyScriptModule(torch.jit.ScriptModule):
def __init__(self, rank):
super().__init__()
self.a = torch.ones(rank)
@torch.jit.script_method
def forward(self) -> Tensor:
return self.a
@torch.jit.script_method
def custom_func(self) -> Tensor:
return self.a
def owner_create_rref_my_script_class(a):
return rpc.RRef(MyScriptClass(a))
def owner_create_rref_my_script_module(a):
return rpc.RRef(MyScriptModule(a), type_hint=MyModuleInterface)
@torch.jit.script
def script_rref_get_value_my_script_class(rref: RRef[MyScriptClass]) -> int:
return rref.to_here().get_value()
@torch.jit.script
def script_rref_run_forward_my_script_module(rref: RRef[MyModuleInterface]) -> Tensor:
return rref.to_here().forward()
class LocalRRefTest:
@dist_init
def test_create_local_script_class_rref_in_py(self):
if self.rank != 0:
return
# Create a local RRef<MyScriptClass>.
rref_script_class = rpc.RRef(MyScriptClass(self.rank))
ret = rref_script_class.to_here().get_value()
self.assertEqual(ret, self.rank)
@dist_init
def test_create_local_script_module_rref_in_py(self):
if self.rank != 0:
return
# Create a local RRef<MyModuleInterface>.
rref_script_module = rpc.RRef(MyScriptModule(self.rank), MyModuleInterface)
ret = rref_script_module.to_here().forward()
self.assertEqual(ret, torch.ones(self.rank))
# Create a local RRef<MyModuleInterface> without type hint.
with self.assertRaisesRegex(
RuntimeError,
(
"The RRef being created contains a ScriptModule, "
"must provide its ModuleInterface type hint."
),
):
rref_script_module = rpc.RRef(MyScriptModule(self.rank))
@dist_init
def test_return_local_script_class_rref_in_py_and_use_in_script(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
# Create a local RRef<MyScripClass> remotely in Python.
rref = rpc.rpc_sync(
dst_worker_name, owner_create_rref_my_script_class, args=(self.rank,)
)
def use_rref_on_owner(rref: RRef[MyScriptClass]) -> int:
args = (rref,)
kwargs: Dict[str, Any] = {}
fut = rpc.rpc_async(
rref.owner(), script_rref_get_value_my_script_class, args, kwargs
)
ret = fut.wait()
return ret
# Use RRef<MyScripClass> in local Python RPC and remote Script run.
ret = use_rref_on_owner(rref)
self.assertEqual(ret, self.rank)
# Use RRef<MyScriptClass> in local Script RPC and remote Script run.
use_rref_on_owner_script = torch.jit.script(use_rref_on_owner)
ret = use_rref_on_owner_script(rref)
self.assertEqual(ret, self.rank)
@dist_init
def test_return_local_script_module_rref_in_py_and_use_in_script(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
# Create a local RRef<MyModuleInterface> remotely in Python.
rref = rpc.rpc_sync(
dst_worker_name, owner_create_rref_my_script_module, args=(self.rank,)
)
def use_rref_on_owner(rref: RRef[MyModuleInterface]) -> Tensor:
args = (rref,)
kwargs: Dict[str, Any] = {}
fut = rpc.rpc_async(
rref.owner_name(),
script_rref_run_forward_my_script_module,
args,
kwargs,
)
ret = fut.wait()
return ret
# Use RRef<MyScripClass> in local Python RPC and remote Script run.
ret = use_rref_on_owner(rref)
self.assertEqual(ret, torch.ones(self.rank))
# Use RRef<MyScriptClass> in local Script RPC and remote Script run.
use_rref_on_owner_script = torch.jit.script(use_rref_on_owner)
ret = use_rref_on_owner_script(rref)
self.assertEqual(ret, torch.ones(self.rank))
def python_function():
return 0
@torch.jit.script
def two_args_two_kwargs(
first_arg,
second_arg,
first_kwarg=torch.tensor([3, 3]),
second_kwarg=torch.tensor([4, 4]),
):
return first_arg + second_arg + first_kwarg + second_kwarg
@torch.jit.script
def assorted_types_args_kwargs(
tensor_arg: Tensor, # noqa: E999
str_arg: str,
int_arg: int,
tensor_kwarg: Tensor = torch.tensor([2, 2]),
str_kwarg: str = "str_kwarg",
int_kwarg: int = 2,
):
return tensor_arg + tensor_kwarg, str_arg + str_kwarg, int_arg + int_kwarg
@torch.jit.script
def raise_script():
raise RuntimeError("Expected error")
@torch.jit.script
def script_rpc_async_call(
dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
):
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
ret = fut.wait()
return ret
@torch.jit.script
def script_rpc_sync_call(
dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
):
res = rpc.rpc_sync(dst_worker_name, two_args_two_kwargs, args, kwargs)
return res
@torch.jit.script
def script_rpc_remote_call(
dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
):
rref_res = rpc.remote(dst_worker_name, two_args_two_kwargs, args, kwargs)
return rref_res.to_here()
class JitRpcOpTest:
# Call functions remotely from Script.
@dist_init
def test_all_kwargs_are_populated_by_defaults(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {}
for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]:
ret = script_op(
dst_worker_name, args, kwargs
)
self.assertEqual(ret, torch.tensor([10, 10]))
@dist_init
def test_some_kwargs_are_populated_by_defaults(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {"first_kwarg": torch.tensor([2, 2])}
for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]:
ret = script_op(
dst_worker_name, args, kwargs
)
self.assertEqual(ret, torch.tensor([9, 9]))
@dist_init
def test_no_kwargs_are_populated_by_defaults(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {
"first_kwarg": torch.tensor([2, 2]),
"second_kwarg": torch.tensor([3, 3]),
}
for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]:
ret = script_op(
dst_worker_name, args, kwargs
)
self.assertEqual(ret, torch.tensor([8, 8]))
@dist_init
def test_args_and_kwargs_contain_different_types(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
@torch.jit.script
def script_rpc_async_call_with_assorted_types(
dst_worker_name: str,
):
args = (torch.tensor([1, 1]), "str_arg", 1)
# Must annotate the value type as `Any`, because JIT type inference
# does not support multiple types when defining a Dict.
# The error JIT gives is,
# "Dict values must contain only a single type, "
# "expected: Tensor but found str instead."
kwargs: Dict[str, Any] = {
"tensor_kwarg": torch.tensor([3, 3]),
"str_kwarg": "_str_kwarg",
"int_kwarg": 3,
}
fut = rpc.rpc_async(
dst_worker_name, assorted_types_args_kwargs, args, kwargs
)
ret = fut.wait()
return ret
ret = script_rpc_async_call_with_assorted_types(
dst_worker_name
)
self.assertEqual(ret, (torch.tensor([4, 4]), "str_arg_str_kwarg", 4))
@dist_init
def test_kwargs_not_passed(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
@torch.jit.script
def script_rpc_async_call_without_kwargs_passed(
dst_worker_name: str,
):
args = ()
fut = rpc.rpc_async(dst_worker_name, no_arg, args)
ret = fut.wait()
return ret
ret = script_rpc_async_call_without_kwargs_passed(
dst_worker_name
)
self.assertEqual(ret, 0)
@dist_init
def test_args_kwargs_are_neither_passed(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
@torch.jit.script
def script_rpc_async_call_without_args_kwargs_passed(
dst_worker_name: str,
):
fut = rpc.rpc_async(dst_worker_name, no_arg)
ret = fut.wait()
return ret
ret = script_rpc_async_call_without_args_kwargs_passed(
dst_worker_name
)
self.assertEqual(ret, 0)
@dist_init
def test_less_than_needed_args_are_specified(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
# Notice, args matching happens during scripting.
with self.assertRaisesRegex(RuntimeError, "Argument second_arg not provided"):
@torch.jit.script
def script_rpc_async_call_with_less_args(
dst_worker_name: str, # noqa: E999
):
args = (torch.tensor([1, 1]),)
kwargs = {}
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
ret = fut.wait()
return ret
@dist_init
def test_more_than_needed_args_are_specified(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
# Notice, args matching happens during scripting.
with self.assertRaisesRegex(
RuntimeError,
"Expected at most 4 arguments but found 5 positional arguments",
):
@torch.jit.script
def script_rpc_async_call_with_more_args(
dst_worker_name: str,
):
args = (
torch.tensor([1, 1]),
torch.tensor([2, 2]),
torch.tensor([3, 3]),
torch.tensor([4, 4]),
torch.tensor([5, 5]),
)
kwargs = {}
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
ret = fut.wait()
return ret
@dist_init
def test_unexepected_kwarg_is_specified(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
# Notice, kwargs matching happens during execution.
@torch.jit.script
def script_rpc_async_call_with_unexpected_kwarg(
dst_worker_name: str, # noqa: E999
):
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {"third_kwarg": torch.tensor([1, 1])}
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
ret = fut.wait()
return ret
with self.assertRaisesRegex(
RuntimeError, "Unknown keyword argument 'third_kwarg'"
):
ret = script_rpc_async_call_with_unexpected_kwarg(
dst_worker_name
)
self.assertEqual(ret, 0)
@dist_init
def test_call_python_function_remotely_from_script_not_supported(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
@torch.jit.script
def rpc_async_call_remote_py_function_in_torchscript(dst_worker_name: str):
args = ()
kwargs = {}
fut = rpc.rpc_async(dst_worker_name, python_function, args, kwargs)
ret = fut.wait()
return ret
with self.assertRaisesRegex(
RuntimeError, "attempted to get undefined function"
):
ret = rpc_async_call_remote_py_function_in_torchscript(dst_worker_name)
self.assertEqual(ret, 0)
@dist_init
def test_call_script_function_that_raises_remotely_from_script(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
# Notice, TorchScript always translates(emits) Python `raise` statement,
# as the exception message string, "Exception",
# no matter what exception type and excetpion message are in the statement,
@torch.jit.script
def rpc_async_call_remote_raising_torchscript_in_torchscript(
dst_worker_name: str,
):
args = ()
kwargs = {}
fut = rpc.rpc_async(dst_worker_name, raise_script, args, kwargs)
ret = fut.wait()
return ret
with self.assertRaisesRegex(RuntimeError, "Expected error"):
ret = rpc_async_call_remote_raising_torchscript_in_torchscript(
dst_worker_name
)
self.assertEqual(ret, 0)
@dist_init
def test_call_script_function_that_not_exists_remotely_from_script(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
@torch.jit.script
def nonexisting_script():
return 0
@torch.jit.script
def rpc_async_call_remote_nonexisting_torchscript_in_torchscript(
dst_worker_name: str,
):
args = ()
kwargs = {}
fut = rpc.rpc_async(dst_worker_name, nonexisting_script, args, kwargs)
ret = fut.wait()
return ret
with self.assertRaisesRegex(
RuntimeError, "attempted to get undefined function nonexisting_script"
):
ret = rpc_async_call_remote_nonexisting_torchscript_in_torchscript(
dst_worker_name
)
self.assertEqual(ret, 0)
@torch.jit.ignore
def my_script_module_init(rank: int) -> MyModuleInterface:
return MyScriptModule(rank)
@torch.jit.script
def construct_my_script_module(rank: int) -> MyModuleInterface:
return my_script_module_init(rank)
@torch.jit.script
def run_ref_script_module(
ref_script_module: RRef[MyModuleInterface], t: Tensor
) -> Tensor:
module = ref_script_module.to_here()
return module.forward() + t
@torch.jit.script
def script_check_rref_confirmed(rref: RRef[Tensor]) -> bool:
return rref.confirmed_by_owner()
@torch.jit.script
def save_rref(rref_var: RRef[Tensor], fname: str) -> None:
torch.save(rref_var, fname)
@torch.jit.script
def script_add(x: Tensor, y: Tensor) -> Tensor:
return x + y
@rpc.functions.async_execution
@torch.jit.script
def async_add(to: str, x: Tensor, y: Tensor) -> Future[Tensor]:
return rpc.rpc_async(to, script_add, (x, y))
@rpc.functions.async_execution
@torch.jit.script
def async_wrong_type() -> Tensor:
return torch.zeros(2)
def load_script_module_with_pickled_rref(pickled_script_module):
f = io.BytesIO(pickled_script_module)
m = torch.jit.load(f)
return m()
class JitRpcTest(
RRefAPITest,
RRefTypingTest,
LocalRRefTest,
JitRpcOpTest,
FutureTypingTest,
RpcAgentTestFixture,
):
@dist_init
def test_torchscript_function(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
local_ret = one_arg(torch.ones(2, 2))
ret = rpc.rpc_sync(dst_worker_name, one_arg, args=(torch.ones(2, 2),))
self.assertEqual(ret, local_ret)
rref = rpc.remote(dst_worker_name, one_arg, args=(torch.ones(2, 2),))
self.assertEqual(rref.to_here(), local_ret)
# create rref to itself
local_rref = rpc.remote(
worker_name(self.rank), one_arg, args=(torch.ones(2, 2),)
)
self.assertEqual(local_rref.to_here(), local_ret)
@dist_init
def test_torchscript_function_exception(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"):
ret = rpc.rpc_sync(dst_worker_name, one_arg, args=(10, 20))
with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"):
rref = rpc.remote(dst_worker_name, one_arg, args=(10, 20))
@dist_init
def test_torchscript_functions_not_supported(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
my_local_script_module = MyScriptModule(self.rank)
# It is not thread safe to instantiate MyScriptModule in multiple threads,
# wait for local MyScriptModule instantiation to finish,
# otherwise it could instantiate MyScriptModule in parallel with
# server thread in the below
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# rpc_sync still accepts script class and run it in
# the same code path as python call.
ret = rpc.rpc_sync(dst_worker_name, MyScriptClass, args=(self.rank,))
# rpc_sync does not accept script module method.
# Python 3.5 and Python 3.6 throw different error message, the only
# common word can be greped is "pickle".
with self.assertRaisesRegex(TypeError, "pickle"):
ret = rpc.rpc_async(
dst_worker_name, my_local_script_module.forward, args=()
)
@dist_init
def test_remote_script_module(self):
# TODO, need more investigation
# there is rref leak when shutting down, suspect it is because
# ref as arg is passed to pybind boundary, and the ref is not garbage
# collected by python when calling shutdown()
import torch.distributed.rpc.api as api
api._ignore_rref_leak = True
local_ret = torch.ones(self.rank) + torch.ones(self.rank)
n = self.rank + 1
dst_rank = n % self.world_size
remote_ref = rpc.remote(
worker_name(dst_rank), construct_my_script_module, args=(self.rank,)
)
# pass rref arg to owner
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_ref_script_module,
args=(remote_ref, torch.ones(self.rank)),
)
self.assertEqual(ret, local_ret)
# pass rref arg to self/user
with self.assertRaisesRegex(
RuntimeError,
"is an RRef to a ScriptModule. It can't be sent through RPC from owner,",
):
ret = rpc.rpc_sync(
worker_name(self.rank),
run_ref_script_module,
args=(remote_ref, torch.ones(self.rank)),
)
@dist_init
def test_create_script_module_on_remote(self):
dst_name = worker_name((self.rank + 1) % self.world_size)
# Construct on remote end with rpc_sync
created_script_module = rpc.rpc_sync(
dst_name, MyScriptModule, args=(self.rank,)
)
# Forward should output a ones tensor of self.rank.
self.assertTrue(isinstance(created_script_module, torch.jit.ScriptModule))
rank_ones_tensor = created_script_module()
self.assertEqual(torch.ones(self.rank), rank_ones_tensor)
# Construct ScriptModule with rpc.remote.
remote_script_module = rpc.remote(dst_name, MyScriptModule, args=(self.rank,))
# Verify it is an instance of ScriptModule on remote end.
remote_end_is_script = rpc.rpc_sync(
remote_script_module.owner(),
rref_isinstance,
args=(remote_script_module, torch.jit.ScriptModule),
)
self.assertTrue(remote_end_is_script)
# Run forward pass remotely.
remote_forward_output = remote_script_module.rpc_sync().forward()
self.assertEqual(remote_forward_output, torch.ones(self.rank))
# Run function defined on ScriptModule remotely.
remote_func_output = remote_script_module.rpc_sync().custom_func()
self.assertEqual(remote_func_output, torch.ones(self.rank))
# Ensure we can transfer ScriptModule RRef to this rank and run
# forward pass.
local_script_module = remote_script_module.to_here()
self.assertTrue(isinstance(local_script_module, torch.jit.ScriptModule))
rank_ones_tensor = local_script_module()
self.assertEqual(rank_ones_tensor, torch.ones(self.rank))
local_script_func_output = local_script_module.custom_func()
self.assertEqual(local_script_func_output, torch.ones(self.rank))
@dist_init
def test_load_script_module_with_pickled_rref(self):
dst_name = worker_name((self.rank + 1) % self.world_size)
m1 = MyScriptModuleWithRRefs(dst_name)
m2 = MyScriptModuleWithRRefs(dst_name)
f = io.BytesIO()
rpc._enable_jit_rref_pickle()
torch.jit.save(m1, f)
rpc._disable_jit_rref_pickle()
out1 = rpc.rpc_sync(
dst_name,
load_script_module_with_pickled_rref,
args=(f.getvalue(),)
)
out2 = m2()
self.assertEqual(out1, out2)
@dist_init
def test_rref_jit_pickle_not_supported(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_var = rpc_return_rref(worker_name(dst_rank))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(
RuntimeError, "RRef jit pickling is only allowed inside RPC calls"
):
save_rref(rref_var, fname)
@dist_init
def test_remote_script_throw(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
script_raise_func,
args=(torch.ones(2),),
)
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_remote_script_udf(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_udf,
args=(torch.ones(2),),
)
self.assertEqual(rref.to_here(), torch.ones(2) * 2)
@dist_init
def test_async_script_udf(self):
future = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_udf,
args=(torch.ones(2),),
)
self.assertEqual(future.wait(), torch.ones(2) * 2)
@dist_init
def test_callback_simple(self):
def callback(fut):
return fut.wait() + 1
future = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_udf,
args=(torch.ones(2),),
).then(callback)
self.assertEqual(future.wait(), torch.ones(2) * 2 + 1)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
dst = worker_name(n % self.world_size)
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size), one_arg, args=(torch.ones(n, n),)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_add_done_callback(self):
callback_called = None
def callback(fut):
nonlocal callback_called
callback_called = fut.wait() * 2
future = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_udf,
args=(torch.ones(2),),
)
future.add_done_callback(callback)
future_then = future.then(lambda _: True)
self.assertEqual(future.wait(), torch.ones(2) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
future_then.wait()
self.assertEqual(callback_called, torch.ones(2) * 4)
@dist_init
def test_async_script_throw(self):
future = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_throw,
args=(torch.ones(2),),
)
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
future.wait()
@dist_init
def test_callback_with_exception(self):
def callback(fut):
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
fut.wait()
raise RuntimeError("Another expected error")
future = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_throw,
args=(torch.ones(2),),
).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
future.wait()
@dist_init
def test_call_rpc_with_profiling(self):
# Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit
# future from within a script function that calls rpc_async
if self.rank == 0:
with _profile() as prof:
prof_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(one_arg),
"worker0",
"worker1",
)
with torch.autograd.profiler.record_function(prof_key) as rf:
ret = call_rpc_with_profiling(rf.handle, "worker1")
# TODO: Can't get a reliable time for this profiling event since
# it's hard to estimate the execution time on the remote end for non-UDFs.
# This can be resolved by https://github.com/pytorch/pytorch/issues/36272.
# After that, this test should be modified to validate the function time.
events = prof.function_events
function_event = get_function_event(events, prof_key)
self.assertTrue(torch._jit_internal._qualified_name(one_arg) in function_event.name)
@dist_init
def test_rpc_async_jit_profiled(self):
# Tests that rpc_async calls made from within a TorchScript function are
# profiled.
if self.rank == 0:
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {}
with _profile() as prof:
script_rpc_async_call(
dst_worker_name, args, kwargs
)
# Ensure rpc_async call is profiled
function_events = prof.function_events
qual_name = torch._jit_internal._qualified_name(two_args_two_kwargs)
rpc_async_jit_event = [
event
for event in function_events
if qual_name in event.name and event.node_id == self.rank
]
self.assertEqual(len(rpc_async_jit_event), 1)
rpc_async_jit_event = rpc_async_jit_event[0]
profiled_name = _build_rpc_profiling_key(
RPCExecMode.ASYNC_JIT,
qual_name,
worker_name(self.rank),
dst_worker_name,
)
self.assertEqual(profiled_name, rpc_async_jit_event.name)
remote_events = [event for event in function_events if event.is_remote]
# All remote events should have taken place on dst_rank
remote_event_node_ids = {
remote_event.node_id for remote_event in remote_events
}
self.assertEqual(remote_event_node_ids, {dst_rank})
# script_rpc_async_call invokes add operator
# so we should see this as a remote event.
remote_add = [
remote_event
for remote_event in remote_events
if "aten::add" in remote_event.name
][0]
remote_add_profiled_name = f"{profiled_name}#remote_op: aten::add"
self.assertEqual(remote_add.name, remote_add_profiled_name)
@dist_init
def test_record_function_on_caller_rpc_async(self):
if self.rank == 0:
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
block_scope = "foo"
with _profile() as prof:
# Runs 2 rpc_async calls within JIT under record_function.
record_function_on_caller_rpc_async(dst_worker_name, block_scope)
# Ensure record_function event is profiled.
function_events = prof.function_events
record_function_scope_event = [
event for event in function_events if event.name == block_scope
]
self.assertEqual(1, len(record_function_scope_event))
record_function_scope_event = record_function_scope_event[0]
# Ensure RPC future is profiled.
expected_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC_JIT,
torch._jit_internal._qualified_name(script_add_ones),
worker_name(self.rank),
dst_worker_name,
)
jit_rpc_events = [
event for event in function_events if event.name == expected_key
]
self.assertEqual(2, len(jit_rpc_events))
# Validate that the record_function scope time is greater than both
# of the individual RPC async call times. The reason it is not necessarily
# greater than the sum is because the two can execute in parallel.
for jit_rpc_event in jit_rpc_events:
self.assertTrue(
record_function_scope_event.cpu_time_total
> jit_rpc_event.cpu_time_total
)
@dist_init
def test_rpc_torchscript_record_function(self):
# tests that torchscript functions can be profiled using with
# record_function(...) over RPC.
REMOTE_OP_STR = "#remote_op: "
if self.rank == 0:
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
block_scope = "foo"
with _profile() as prof:
call_rpc_torchscript_with_record_function(dst_worker_name, block_scope)
# Need to call below to populate CPU children.
prof.key_averages()
function_events = prof.function_events
expected_key = (
_build_rpc_profiling_key(
RPCExecMode.ASYNC_JIT,
torch._jit_internal._qualified_name(
script_add_ones_with_record_function
),
worker_name(self.rank),
dst_worker_name,
)
+ REMOTE_OP_STR
+ block_scope
)
remote_record_function_event = [
evt for evt in function_events if evt.name == expected_key
][0]
self.assertTrue(block_scope in remote_record_function_event.name)
remote_children = remote_record_function_event.cpu_children
self.assertTrue("aten::add" in child.name for child in remote_children)
def test_record_function_jit_end_callbacks_with_fork(self):
# Ensures that we can call rf._call_end_callbacks_on_future on a jit
# future in python eager mode with torch.jit.fork
sleep_interval = 1
with _profile() as prof:
with torch.autograd.profiler.record_function("foo") as rf:
fut = torch.jit._fork(sleep, sleep_interval)
rf._call_end_callbacks_on_future(fut)
fut.wait()
function_events = prof.function_events
sleep_event = get_function_event(function_events, "foo")
self.assertEqual(sleep_event.name, "foo")
# Validate that callbacks were fired at the right time by checking the
# profiling event cpu time
self.assertGreaterAlmostEqual(sleep_event.cpu_time * 1e-6, sleep_interval)
def test_call_fork_in_jit_with_profiling(self):
# Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit
# future from within a script function with torch.jit.fork
with _profile() as prof:
with torch.autograd.profiler.record_function("foo") as rf:
ret = call_fork_with_profiling(rf.handle)
events = prof.function_events
function_event = get_function_event(events, "foo")
self.assertEqual(function_event.name, "foo")
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2))
)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_async_function_wrong_return_type(self):
with self.assertRaisesRegex(
RuntimeError,
"Async functions must return an IValue of Future type, but got Tensor",
):
rpc.rpc_sync(
worker_name((self.rank + 1) % self.world_size), async_wrong_type
)
@dist_init
def test_async_function_wrong_decorator_order(self):
# @torch.jit.script complains about undefined value rpc. Error is shown
# below. The reason for not checking error string is to avoid making
# JIT error handling code depend on RPC tests, as we don't have any
# restrictions on the error message here.
#
# RuntimeError:
# undefined value rpc:
# def async_wrong_decorator_order(to, x, y):
# # type: (str, Tensor, Tensor) -> Future[Tensor]
# return rpc.rpc_async(to, script_add, (x, y))
# ~~~ <--- HERE
with self.assertRaises(RuntimeError):
@torch.jit.script
@rpc.functions.async_execution
def async_wrong_decorator_order(
to: str, x: Tensor, y: Tensor
) -> Future[Tensor]:
return rpc.rpc_async(to, script_add, (x, y))
@dist_init
def test_async_function_remote(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(
dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2))
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_async_function_remote_multi(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
rrefs = []
for i in range(num):
rrefs.append(
rpc.remote(
dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2) * i)
)
)
for i in range(num):
self.assertEqual(rrefs[i].to_here(), torch.ones(2, 2) + i)
@dist_init
def test_async_function_wrong_return_type_remote(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size), async_wrong_type
)
with self.assertRaisesRegex(
RuntimeError,
"Async functions must return an IValue of Future type, but got Tensor",
):
rref.to_here()
| pytorch-master | torch/testing/_internal/distributed/rpc/jit/rpc_test.py |
from typing import Dict, Tuple
import torch
import torch.distributed.rpc as rpc
from torch import Tensor
from torch.distributed.rpc import RRef
from torch.testing._internal.dist_utils import (
dist_init,
worker_name,
wait_until_pending_futures_and_users_flushed
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
@torch.jit.script
def two_args_two_kwargs(
first_arg,
second_arg,
first_kwarg=torch.tensor([3, 3]),
second_kwarg=torch.tensor([4, 4]),
):
return first_arg + second_arg + first_kwarg + second_kwarg
@torch.jit.script
def script_rpc_async_call(
dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
):
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
ret = fut.wait()
return ret
@torch.jit.script
def rpc_async_call_with_timeout(
dst_worker_name: str,
args: Tuple[Tensor, Tensor],
kwargs: Dict[str, Tensor],
timeout: float,
):
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs, timeout)
ret = fut.wait()
return ret
@torch.jit.script
def rpc_async_call_with_timeout_future_ret(
dst_worker_name: str,
args: Tuple[Tensor, Tensor],
kwargs: Dict[str, Tensor],
timeout: float,
):
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs, timeout)
return fut
@torch.jit.script
def rpc_async_call_future_ret(
dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
):
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
return fut
@torch.jit.script
def rref_to_here(rref_var: RRef[Tensor]) -> Tensor:
return rref_var.to_here()
@torch.jit.script
def rref_to_here_with_timeout(rref_var: RRef[Tensor], timeout: float) -> Tensor:
return rref_var.to_here(timeout)
@torch.jit.script
def rpc_async_with_rref_arg(dst_worker_name: str, args: Tuple[RRef[Tensor]]) -> Tensor:
fut = rpc.rpc_async(dst_worker_name, rref_to_here, args)
ret = fut.wait()
return ret
class JitFaultyAgentRpcTest(RpcAgentTestFixture):
"""
Run tests for rpc_async in JIT under the faulty agent test fixture to test
arbitrary timeouts.
"""
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_timeout_in_torchscript_function(self):
# Call rpc_async + fut.wait() in torchscript function and ensure that
# timeout is raised.
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {
"first_kwarg": torch.tensor([2, 2]),
"second_kwarg": torch.tensor([3, 3]),
}
expected_error = self.get_timeout_error_regex()
# Ensure that we get a timeout if we override the default timeout and
# the RPC takes longer to execute.
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0.5)
# Ensure that we timeout if we don't specify a timeout but the default
# is less than the RPC takes to execute.
rpc._set_rpc_timeout(0.001)
with self.assertRaisesRegex(RuntimeError, expected_error):
script_rpc_async_call(
dst_worker_name, args, kwargs
)
# Ensure that we run to completion if zero timeout is specified.
ret = rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0)
self.assertEqual(ret, torch.tensor([8, 8]))
# reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_timeout_in_python(self):
# Ensures timeouts are raised if we call rpc_async from within a
# torchscript function, but wait on the future in python.
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {
"first_kwarg": torch.tensor([2, 2]),
"second_kwarg": torch.tensor([3, 3]),
}
expected_error = self.get_timeout_error_regex()
fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0.5)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure timeout if we don't specify but the default is less than the
# RPC takes to execute.
rpc._set_rpc_timeout(0.001)
fut = rpc_async_call_future_ret(dst_worker_name, args, kwargs)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if zero timeout is specified
fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0)
result = fut.wait()
self.assertEqual(result, torch.tensor([8, 8]))
# reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_remote_timeout_to_here_in_jit(self):
# Test that calling to_here() in JIT will raise timeout error if
# rpc.remote failed.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
# Will ensure error handling callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Call to_here() within a ScriptFunction and ensure it raises
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref_to_here(rref)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout_in_jit(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref_to_here_with_timeout(rref, 0.01)
rref_to_here_with_timeout(rref, 100)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_rref_timeout_pickle_in_jit(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
# Will ensure error handling callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Call RPC with RRef arg in JIT, which will go through JIT pickling and
# ensure error is raised.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc_async_with_rref_arg(dst_worker, (rref, ))
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_rref_timeout_pickle_script_func(self):
# Similar to above test, but calls python rpc with script function.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
# Will ensure error handling callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Call RPC with script function that takes RRef, ensure timeout during pickling
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_sync(dst_worker, rref_to_here, args=(rref, ))
| pytorch-master | torch/testing/_internal/distributed/rpc/jit/rpc_test_faulty.py |
pytorch-master | torch/testing/_internal/distributed/rpc/examples/__init__.py |
|
# If you need to modify this file to make this test pass, please also apply same edits accordingly to
# https://github.com/pytorch/examples/blob/master/distributed/rpc/rl/main.py
# and https://pytorch.org/tutorials/intermediate/rpc_tutorial.html
import numpy as np
from itertools import count
import torch
import torch.distributed.rpc as rpc
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributed.rpc import RRef, rpc_sync, rpc_async, remote
from torch.distributions import Categorical
from torch.testing._internal.dist_utils import dist_init, worker_name
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import RpcAgentTestFixture
TOTAL_EPISODE_STEP = 5000
GAMMA = 0.1
SEED = 543
def _call_method(method, rref, *args, **kwargs):
r"""
a helper function to call a method on the given RRef
"""
return method(rref.local_value(), *args, **kwargs)
def _remote_method(method, rref, *args, **kwargs):
r"""
a helper function to run method on the owner of rref and fetch back the
result using RPC
"""
args = [method, rref] + list(args)
return rpc_sync(rref.owner(), _call_method, args=args, kwargs=kwargs)
class Policy(nn.Module):
r"""
Borrowing the ``Policy`` class from the Reinforcement Learning example.
Copying the code to make these two examples independent.
See https://github.com/pytorch/examples/tree/master/reinforcement_learning
"""
def __init__(self):
super(Policy, self).__init__()
self.affine1 = nn.Linear(4, 128)
self.dropout = nn.Dropout(p=0.6)
self.affine2 = nn.Linear(128, 2)
self.saved_log_probs = []
self.rewards = []
def forward(self, x):
x = self.affine1(x)
x = self.dropout(x)
x = F.relu(x)
action_scores = self.affine2(x)
return F.softmax(action_scores, dim=1)
class DummyEnv:
r"""
A dummy environment that implements the required subset of the OpenAI gym
interface. It exists only to avoid a dependency on gym for running the
tests in this file. It is designed to run for a set max number of iterations,
returning random states and rewards at each step.
"""
def __init__(self, state_dim=4, num_iters=10, reward_threshold=475.0):
self.state_dim = state_dim
self.num_iters = num_iters
self.iter = 0
self.reward_threshold = reward_threshold
def seed(self, manual_seed):
torch.manual_seed(manual_seed)
def reset(self):
self.iter = 0
return torch.randn(self.state_dim)
def step(self, action):
self.iter += 1
state = torch.randn(self.state_dim)
reward = torch.rand(1).item() * self.reward_threshold
done = self.iter >= self.num_iters
info = {}
return state, reward, done, info
class Observer:
r"""
An observer has exclusive access to its own environment. Each observer
captures the state from its environment, and send the state to the agent to
select an action. Then, the observer applies the action to its environment
and reports the reward to the agent.
"""
def __init__(self):
self.id = rpc.get_worker_info().id
self.env = DummyEnv()
self.env.seed(SEED)
def run_episode(self, agent_rref, n_steps):
r"""
Run one episode of n_steps.
Arguments:
agent_rref (RRef): an RRef referencing the agent object.
n_steps (int): number of steps in this episode
"""
state, ep_reward = self.env.reset(), 0
for step in range(n_steps):
# send the state to the agent to get an action
action = _remote_method(Agent.select_action, agent_rref, self.id, state)
# apply the action to the environment, and get the reward
state, reward, done, _ = self.env.step(action)
# report the reward to the agent for training purpose
_remote_method(Agent.report_reward, agent_rref, self.id, reward)
if done:
break
class Agent:
def __init__(self, world_size):
self.ob_rrefs = []
self.agent_rref = RRef(self)
self.rewards = {}
self.saved_log_probs = {}
self.policy = Policy()
self.optimizer = optim.Adam(self.policy.parameters(), lr=1e-2)
self.eps = np.finfo(np.float32).eps.item()
self.running_reward = 0
self.reward_threshold = DummyEnv().reward_threshold
for ob_rank in range(1, world_size):
ob_info = rpc.get_worker_info(worker_name(ob_rank))
self.ob_rrefs.append(remote(ob_info, Observer))
self.rewards[ob_info.id] = []
self.saved_log_probs[ob_info.id] = []
def select_action(self, ob_id, state):
r"""
This function is mostly borrowed from the Reinforcement Learning example.
See https://github.com/pytorch/examples/tree/master/reinforcement_learning
The main difference is that instead of keeping all probs in one list,
the agent keeps probs in a dictionary, one key per observer.
NB: no need to enforce thread-safety here as GIL will serialize
executions.
"""
probs = self.policy(state.unsqueeze(0))
m = Categorical(probs)
action = m.sample()
self.saved_log_probs[ob_id].append(m.log_prob(action))
return action.item()
def report_reward(self, ob_id, reward):
r"""
Observers call this function to report rewards.
"""
self.rewards[ob_id].append(reward)
def run_episode(self, n_steps=0):
r"""
Run one episode. The agent will tell each observer to run n_steps.
"""
futs = []
for ob_rref in self.ob_rrefs:
# make async RPC to kick off an episode on all observers
futs.append(
rpc_async(
ob_rref.owner(),
_call_method,
args=(Observer.run_episode, ob_rref, self.agent_rref, n_steps)
)
)
# wait until all obervers have finished this episode
for fut in futs:
fut.wait()
def finish_episode(self):
r"""
This function is mostly borrowed from the Reinforcement Learning example.
See https://github.com/pytorch/examples/tree/master/reinforcement_learning
The main difference is that it joins all probs and rewards from
different observers into one list, and uses the minimum observer rewards
as the reward of the current episode.
"""
# joins probs and rewards from different observers into lists
R, probs, rewards = 0, [], []
for ob_id in self.rewards:
probs.extend(self.saved_log_probs[ob_id])
rewards.extend(self.rewards[ob_id])
# use the minimum observer reward to calculate the running reward
min_reward = min([sum(self.rewards[ob_id]) for ob_id in self.rewards])
self.running_reward = 0.05 * min_reward + (1 - 0.05) * self.running_reward
# clear saved probs and rewards
for ob_id in self.rewards:
self.rewards[ob_id] = []
self.saved_log_probs[ob_id] = []
policy_loss, returns = [], []
for r in rewards[::-1]:
R = r + GAMMA * R
returns.insert(0, R)
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + self.eps)
for log_prob, R in zip(probs, returns):
policy_loss.append(-log_prob * R)
self.optimizer.zero_grad()
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
self.optimizer.step()
return min_reward
def run_agent(agent, n_steps):
for i_episode in count(1):
agent.run_episode(n_steps=n_steps)
last_reward = agent.finish_episode()
if agent.running_reward > agent.reward_threshold:
print("Solved! Running reward is now {}!".format(agent.running_reward))
break
class ReinforcementLearningRpcTest(RpcAgentTestFixture):
@dist_init(setup_rpc=False)
def test_rl_rpc(self):
if self.rank == 0:
# Rank 0 is the agent.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
agent = Agent(self.world_size)
run_agent(agent, n_steps=int(TOTAL_EPISODE_STEP / (self.world_size - 1)))
# Ensure training was run. We don't really care about whether the task was learned,
# since the purpose of the test is to check the API calls.
self.assertGreater(agent.running_reward, 0.0)
else:
# Other ranks are observers that passively wait for instructions from the agent.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
| pytorch-master | torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py |
# If you need to modify this file to make this test pass, please also apply same edits accordingly to
# https://github.com/pytorch/examples/blob/master/distributed/rpc/batch/parameter_server.py
# and https://pytorch.org/tutorials/intermediate/rpc_async_execution.html#batch-updating-parameter-server
import threading
from datetime import datetime
from time import perf_counter
import torch
import torch.distributed.rpc as rpc
import torch.nn as nn
from torch import optim
from torch.testing._internal.dist_utils import (
dist_init,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import RpcAgentTestFixture
batch_size = 20
in_features = 100
out_features = 30
num_batches = 4
def timed_log(text):
print(f"{datetime.now().strftime('%H:%M:%S')} {text}")
class BatchUpdateParameterServer(object):
def __init__(self, batch_update_size):
self.model = nn.Linear(in_features, out_features)
self.lock = threading.Lock()
self.future_model = torch.futures.Future()
self.batch_update_size = batch_update_size
self.curr_update_size = 0
self.optimizer = optim.SGD(self.model.parameters(), lr=0.001, momentum=0.9)
for p in self.model.parameters():
p.grad = torch.zeros_like(p)
def get_model(self):
return self.model
@staticmethod
@rpc.functions.async_execution
def update_and_fetch_model(ps_rref, grads):
self = ps_rref.local_value()
for p, g in zip(self.model.parameters(), grads):
p.grad += g
with self.lock:
timed_log(f"PS got {self.curr_update_size}/{self.batch_update_size} updates")
self.curr_update_size += 1
fut = self.future_model
if self.curr_update_size >= self.batch_update_size:
for p in self.model.parameters():
p.grad /= self.batch_update_size
self.curr_update_size = 0
self.optimizer.step()
self.optimizer.zero_grad()
fut.set_result(self.model)
timed_log("PS updated model")
self.future_model = torch.futures.Future()
return fut
class Trainer(object):
def __init__(self, ps_rref):
self.ps_rref = ps_rref
self.loss_fn = nn.L1Loss()
def get_next_batch(self):
for _ in range(num_batches):
inputs = torch.randn(batch_size, in_features)
labels = torch.zeros(batch_size, out_features)
yield inputs, labels
def train(self):
name = rpc.get_worker_info().name
m = self.ps_rref.rpc_sync().get_model()
for inputs, labels in self.get_next_batch():
timed_log(f"{name} processing one batch")
self.loss_fn(m(inputs), labels).backward()
timed_log(f"{name} reporting grads")
m = rpc.rpc_sync(
self.ps_rref.owner(),
BatchUpdateParameterServer.update_and_fetch_model,
args=(self.ps_rref, [p.grad for p in m.cpu().parameters()]),
)
timed_log(f"{name} got updated model")
def run_trainer(ps_rref):
trainer = Trainer(ps_rref)
trainer.train()
def run_ps(trainers):
timed_log("Start training")
start = perf_counter()
ps_rref = rpc.RRef(BatchUpdateParameterServer(len(trainers)))
futs = []
for trainer in trainers:
futs.append(
rpc.rpc_async(trainer, run_trainer, args=(ps_rref,))
)
torch.futures.wait_all(futs)
stop = perf_counter()
timed_log("Finish training")
timed_log(f"Time spent training: {stop-start}s")
class ParameterServerTest(RpcAgentTestFixture):
@dist_init(setup_rpc=False)
def test_batch_updating_parameter_server(self):
if self.rank != 0:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
else:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
run_ps([f"{worker_name(r)}" for r in range(1, self.world_size)])
rpc.shutdown()
| pytorch-master | torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py |
pytorch-master | torch/testing/_internal/codegen/__init__.py |
|
import torch
import numpy as np
import argparse
from typing import Dict
# debug print
DEBUG_PRINT = False
################################################################################
# configuration for random tests setup
################################################################################
# maximum number of tensors as inputs
MAX_TENSOR = 6
# maximum tensor rank
MAX_TENSOR_DIM = 5
# maximum tensor size
MAX_TENSOR_SIZE = 2**20
# use a size 1 tensor for debug
DEBUG_TENSOR = False
# tensor device
DEVICE = "cuda"
# data type for tensors
DTYPE = torch.float
# factor sorta control the depth of the model
GRAPH_FACTOR = 2
################################################################################
# helper functions
################################################################################
class WrongResultException(Exception):
pass
# randomly reduce tensor_shape while preserving it to be broadcast-compatible
# two thing are done here:
# 1. trim starting dimensions;
# 2. randomly clamp remaining dimension to be size 1;
def get_broadcast_compatible_shape(tensor_shape):
max_dim = len(tensor_shape)
num_b_dims = np.random.randint(0, max_dim + 1)
trim_head = np.random.randint(0, min(num_b_dims + 1, max_dim))
shape = tensor_shape[trim_head:max_dim]
for i in np.random.choice(range(max_dim - trim_head),
num_b_dims - trim_head,
replace=False):
shape[i] = 1
return shape
# generate random topology using seed and also flags
def random_topology_test(seed, *inp_tensor_list):
np.random.seed(int(seed.numpy().tolist()))
tensor_list = [*inp_tensor_list]
num_tensor = len(tensor_list)
# randomly add available constant value
num_const = np.random.randint(0, num_tensor + 1)
const_list = np.random.random(num_const)
if DEBUG_PRINT:
for const_item in const_list:
print("----- real number {:.10f}", const_item)
# we require all tensor to be in a single dependency set
def get_root(x, dependency_map):
if x in dependency_map:
return get_root(dependency_map[x], dependency_map)
else:
return x
d_map: Dict[int, int] = {}
num_sets = num_tensor
candidate = list(range(num_tensor))
unary_operations = [torch.sigmoid, torch.relu]
binary_operations = [torch.add, torch.sub, torch.mul]
u_op_size = len(unary_operations)
b_op_size = len(binary_operations)
num_operations = np.random.randint(num_sets - 1,
num_sets * GRAPH_FACTOR)
ret_list = []
while num_operations >= 0 or num_sets > 1:
# we start off with randomly pick a candidate and operation
index = np.random.randint(0, len(candidate))
op_index = np.random.randint(0, u_op_size + b_op_size)
lh_index = candidate[index]
rh_index = None
out_tensor = None
if DEBUG_PRINT:
print("iteration {0}, num_sets{1}, candidates {2}, tensor_list {3}, lh_index {4}, op_index {5}".format(
num_operations, num_sets, candidate, len(tensor_list), lh_index, op_index))
if num_operations >= 0:
num_operations -= 1
if op_index < u_op_size:
# unary operation, we just apply a random operation on candidate
out_tensor = unary_operations[op_index](tensor_list[lh_index])
else:
# binary operation, we randomly choose the other operand:
# 1. tensor on tensor operation -> rh_index
# 2. tensor on const operation
# we are not restricted to candidate tensor any more.
op_2_index = np.random.randint(0, len(tensor_list) + num_const)
if op_2_index < len(tensor_list):
if op_2_index == lh_index:
# if we are unlucky that we picked on the candidate again, just try
# another tensor
op_2_index = (op_2_index + 1) % len(tensor_list)
# [if rh_index: create binary operator output tensor]
rh_index = op_2_index
else:
left = tensor_list[lh_index]
right = const_list[op_2_index - len(tensor_list)]
# if np.random.randint(0, 2) > 0:
# left = const_list[op_2_index - len(tensor_list)]
# right = tensor_list[lh_index]
out_tensor = binary_operations[op_index - u_op_size](left, right)
if DEBUG_PRINT:
print("binary, op_2_index {0}, rh_index ?{1}".format(op_2_index, rh_index))
else:
# binary operation, we just randomly pick two candidates.
# this is not the most efficient way to close dependecy, as we could have
# two candidate that are actually connected
cand_index = np.random.randint(0, len(candidate))
if cand_index == index:
cand_index = (cand_index + 1) % len(candidate)
# [if rh_index: create binary operator output tensor]
rh_index = candidate[cand_index]
if DEBUG_PRINT:
print("binary rh_index ?{0}".format(rh_index))
# update candidate should happen before we remove rh_index
candidate[index] = len(tensor_list)
lh_root = get_root(lh_index, d_map)
# [if rh_index: create binary operator output tensor]
if rh_index is not None:
out_tensor = binary_operations[op_index - u_op_size](
tensor_list[lh_index],
tensor_list[rh_index])
# remove rh_index from candidate if it is used
if rh_index in candidate:
# python remove(val), not by index
candidate.remove(rh_index)
# check if we join dependency sets:
rh_root = get_root(rh_index, d_map)
if lh_root != rh_root:
num_sets -= 1
# update dependency, no need to update d_map[rh_root] when
# they are already pointing the same root
d_map[rh_root] = len(tensor_list)
# no joining, just update dependency
d_map[lh_root] = len(tensor_list)
# update candidate, this avoids us applying identical operation on
# the same tensor(s)
tensor_list.append(out_tensor)
# TODO: we should mark
# union(random_sample(tensor_list[num_tensor:]), candidate) as outputs.
# which would ensure we have no dead branch and a connected computation
# graph. However, it won't work easily if we have broadcast.
# I have disabled broadcast for now to focus on topology test.
for ind in candidate:
ret_list.append(tensor_list[ind])
out_list = np.random.choice(
range(num_tensor, len(tensor_list)),
np.random.randint(0, len(tensor_list) - num_tensor),
False)
for ind in out_list:
if ind not in candidate:
ret_list.append(tensor_list[ind])
if DEBUG_PRINT:
print("ended with tensor_list: {0}".format(len(tensor_list)))
return tuple(ret_list)
def prepareInputTensorsToRandomTopoTest(seed,
max_tensor_num,
max_tensor_dim,
max_tensor_size,
debug_tensor,
device,
dtype):
# set seed to numpy as well as torch
np.random.seed(seed)
torch.manual_seed(np.random.randint(0, seed))
# seed to pass to torch.jit.trace
seed_tensor = torch.tensor(np.random.randint(0, seed))
# random number of input tensors
num_tensor = np.random.randint(1, max_tensor_num)
# prepare randomized tensor shape
tensor_dim = np.random.randint(1, max_tensor_dim)
tensor_shape = []
numel = 1
if debug_tensor:
tensor_shape.append(1)
else:
for i in range(tensor_dim):
size_i = np.random.randint(1, int(max_tensor_size / numel / (2**(tensor_dim - i))))
size_i = min(size_i, 128 + size_i % 128)
tensor_shape.insert(0, size_i)
numel *= size_i
if DEBUG_PRINT:
print("output tensor shape: ", tensor_shape)
# vvv BROADCASTING vvv
# select tensors to be broadcasted
# TODO: enable broadcasting when we fully support it.
# num_broadcasted_tensors = np.random.randint(0, num_tensor)
num_broadcasted_tensors = np.random.randint(0, 1)
# we leave at least one tensor not broadcasted
broadcasted_tensors_indices = np.random.choice(torch.arange(num_tensor),
num_broadcasted_tensors,
replace=False)
# vvv PREPARING TENSORS vvv
tensor_list = []
for i in range(num_tensor):
if i in broadcasted_tensors_indices:
# get broadcast-compatible shape:
# Note that we are not playing with stride here, as stride doesn't affect
# codegen meaningfully.
compatible_shape = get_broadcast_compatible_shape(tensor_shape)
tensor_list.append(torch.randn(compatible_shape, device=device, dtype=dtype) * 100)
else:
tensor_list.append(torch.randn(tensor_shape, device=device, dtype=dtype) * 100)
return seed_tensor, tensor_list
def reproString(current_seed, args):
repro_str = "python {0}".format(__file__)
if args.cuda_fuser:
repro_str += " --cuda_fuser"
if args.legacy_fuser:
repro_str += " --legacy_fuser"
if args.profiling_executor:
repro_str += " --profiling_executor"
if args.fp16:
repro_str += " --fp16"
if args.cpu:
repro_str += " --cpu"
repro_str += " --max_num_tensor {0} --max_tensor_dim {1} --max_tensor_size {2}"\
" --depth_factor {3} --seed {4} --repro_run".format(
args.max_num_tensor, args.max_tensor_dim, args.max_tensor_size,
args.depth_factor, current_seed)
return repro_str
################################################################################
# global seed to repro the test
################################################################################
def runDefaultTestWithSeed(seed):
# prepare input tensors
seed_tensor, tensor_list = prepareInputTensorsToRandomTopoTest(seed,
MAX_TENSOR,
MAX_TENSOR_DIM,
MAX_TENSOR_SIZE,
DEBUG_TENSOR,
DEVICE,
DTYPE)
o = random_topology_test(seed_tensor, *tensor_list)
traced_model = torch.jit.trace(random_topology_test, (seed_tensor, *tensor_list))
jit_o = traced_model(seed_tensor, *tensor_list) # possible profiling run
jit_o = traced_model(seed_tensor, *tensor_list)
validate_o = zip(o, jit_o)
for oo, jit_oo in validate_o:
if not oo.allclose(jit_oo, atol=1e-5, equal_nan=True):
return False
return True
def runTest(seed, args):
# prepare input tensors
seed_tensor, tensor_list = prepareInputTensorsToRandomTopoTest(seed,
args.max_num_tensor,
args.max_tensor_dim,
args.max_tensor_size,
args.debug_tensor,
"cuda" if not args.cpu else "cpu",
torch.float32 if not args.fp16 else torch.float16)
# vvv run random generated topo test in eager vvv
try:
if DEBUG_PRINT:
print("seed tensor: ", seed_tensor)
o = random_topology_test(seed_tensor, *tensor_list)
if DEBUG_PRINT:
for out in o:
print("val size: ", out.size())
except Exception as err:
raise Exception("Testing script failure with error message, repro by running:\n"
f"\t{reproString(seed, args)}") from err
try:
traced_model = torch.jit.trace(random_topology_test, (seed_tensor, *tensor_list))
if DEBUG_PRINT:
print("original graph: ", traced_model.graph)
jit_o = traced_model(seed_tensor, *tensor_list) # possible profiling run
jit_o = traced_model(seed_tensor, *tensor_list)
if DEBUG_PRINT:
print("optimized graph: ", traced_model.graph_for(seed_tensor, *tensor_list))
validate_o = zip(o, jit_o)
for oo, jit_oo in validate_o:
if not oo.allclose(jit_oo, equal_nan=True):
print("eager output: ", oo)
print("jit output: ", jit_oo)
print("diff ", jit_oo - oo)
raise WrongResultException()
except WrongResultException as err:
raise Exception("cuda fuser gives wrong results, repro by running:\n"
f"\t{reproString(seed, args)}") from err
except Exception as err:
raise Exception("something in cuda fuser went wrong, repro by running:\n"
f"\t{reproString(seed, args)}") from err
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--cuda_fuser", action='store_true', default=True)
parser.add_argument("--legacy_fuser", action='store_true', default=False)
parser.add_argument("--profiling_executor", action='store_true', default=False)
parser.add_argument("--fp16", action='store_true', default=False)
parser.add_argument("--cpu", action='store_true', default=False)
parser.add_argument("--debug_print", action='store_true', default=False)
parser.add_argument("--debug_tensor", action='store_true', default=False)
parser.add_argument("--max_num_tensor", default=MAX_TENSOR, type=int)
parser.add_argument("--max_tensor_dim", default=MAX_TENSOR_DIM, type=int)
parser.add_argument("--max_tensor_size", default=MAX_TENSOR_SIZE, type=int)
parser.add_argument("--depth_factor", default=GRAPH_FACTOR, type=int)
parser.add_argument("--seed", default=45589, type=int)
group = parser.add_mutually_exclusive_group()
group.add_argument("--iterations", default=4, type=int)
group.add_argument("--repro_run", action='store_true', default=False)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
# Register CUDA fuser
if args.cuda_fuser:
torch._C._jit_set_nvfuser_enabled(True)
# Turn off legacy fuser
if not args.legacy_fuser:
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
# Turn off profiling executor
if not args.profiling_executor:
torch._C._jit_set_profiling_executor(False)
torch._C._get_graph_executor_optimize(False)
# factor sorta control the depth of the model
GRAPH_FACTOR = args.depth_factor
# debug print
DEBUG_PRINT = args.debug_print
if args.repro_run:
runTest(args.seed, args)
else:
np.random.seed(args.seed)
failing_repros = []
for seed in np.random.randint(0, args.seed, args.iterations):
try:
runTest(seed, args)
except Exception as e:
failing_repros.append(str(e))
if len(failing_repros) == 0:
print("test passed")
else:
print("{0} out of {1} tests failed;".format(
len(failing_repros), args.iterations))
print("To repro failing tests, run\n")
for repro in failing_repros:
print(repro)
| pytorch-master | torch/testing/_internal/codegen/random_topo_test.py |
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(10, 20)
| pytorch-master | torch/testing/_internal/data/network1.py |
pytorch-master | torch/testing/_internal/data/__init__.py |
|
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(10, 20)
self.relu = nn.ReLU()
| pytorch-master | torch/testing/_internal/data/network2.py |
import torch
import functools
import warnings
from typing import Any, Optional
from torch.types import _dtype
__all__ = ['autocast_decorator', 'autocast']
def autocast_decorator(autocast_instance, func):
@functools.wraps(func)
def decorate_autocast(*args, **kwargs):
with autocast_instance:
return func(*args, **kwargs)
decorate_autocast.__script_unsupported = '@autocast() decorator is not supported in script mode' # type: ignore[attr-defined]
return decorate_autocast
class autocast(object):
r"""
Instances of :class:`autocast` serve as context managers or decorators that
allow regions of your script to run in mixed precision.
In these regions, ops run in an op-specific dtype chosen by autocast
to improve performance while maintaining accuracy.
See the :ref:`Autocast Op Reference<autocast-op-reference>` for details.
When entering an autocast-enabled region, Tensors may be any type.
You should not call ``half()`` or ``bfloat16()`` on your model(s) or inputs when using autocasting.
:class:`autocast` should wrap only the forward pass(es) of your network, including the loss
computation(s). Backward passes under autocast are not recommended.
Backward ops run in the same type that autocast used for corresponding forward ops.
Example for CUDA Devices::
# Creates model and optimizer in default precision
model = Net().cuda()
optimizer = optim.SGD(model.parameters(), ...)
for input, target in data:
optimizer.zero_grad()
# Enables autocasting for the forward pass (model + loss)
with autocast():
output = model(input)
loss = loss_fn(output, target)
# Exits the context manager before backward()
loss.backward()
optimizer.step()
See the :ref:`CUDA Automatic Mixed Precision examples<amp-examples>` for usage (along with gradient scaling)
in more complex scenarios (e.g., gradient penalty, multiple models/losses, custom autograd functions).
:class:`autocast` can also be used as a decorator, e.g., on the ``forward`` method of your model::
class AutocastModel(nn.Module):
...
@autocast()
def forward(self, input):
...
Floating-point Tensors produced in an autocast-enabled region may be ``float16``.
After returning to an autocast-disabled region, using them with floating-point
Tensors of different dtypes may cause type mismatch errors. If so, cast the Tensor(s)
produced in the autocast region back to ``float32`` (or other dtype if desired).
If a Tensor from the autocast region is already ``float32``, the cast is a no-op,
and incurs no additional overhead.
CUDA Example::
# Creates some tensors in default dtype (here assumed to be float32)
a_float32 = torch.rand((8, 8), device="cuda")
b_float32 = torch.rand((8, 8), device="cuda")
c_float32 = torch.rand((8, 8), device="cuda")
d_float32 = torch.rand((8, 8), device="cuda")
with autocast():
# torch.mm is on autocast's list of ops that should run in float16.
# Inputs are float32, but the op runs in float16 and produces float16 output.
# No manual casts are required.
e_float16 = torch.mm(a_float32, b_float32)
# Also handles mixed input types
f_float16 = torch.mm(d_float32, e_float16)
# After exiting autocast, calls f_float16.float() to use with d_float32
g_float32 = torch.mm(d_float32, f_float16.float())
CPU Training Example::
# Creates model and optimizer in default precision
model = Net()
optimizer = optim.SGD(model.parameters(), ...)
for epoch in epochs:
for input, target in data:
optimizer.zero_grad()
# Runs the forward pass with autocasting.
with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
output = model(input)
loss = loss_fn(output, target)
loss.backward()
optimizer.step()
CPU Inference Example::
# Creates model in default precision
model = Net().eval()
with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
for input in data:
# Runs the forward pass with autocasting.
output = model(input)
CPU Inference Example with Jit Trace::
class TestModel(nn.Module):
def __init__(self, input_size, num_classes):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(input_size, num_classes)
def forward(self, x):
return self.fc1(x)
input_size = 2
num_classes = 2
model = TestModel(input_size, num_classes).eval()
# For now, we suggest to disable the Jit Autocast Pass,
# As the issue: https://github.com/pytorch/pytorch/issues/75956
torch._C._jit_set_autocast_mode(False)
with torch.cpu.amp.autocast(cache_enabled=False):
model = torch.jit.trace(model, torch.randn(1, input_size))
model = torch.jit.freeze(model)
# Models Run
for _ in range(3):
model(torch.randn(1, input_size))
Type mismatch errors *in* an autocast-enabled region are a bug; if this is what you observe,
please file an issue.
``autocast(enabled=False)`` subregions can be nested in autocast-enabled regions.
Locally disabling autocast can be useful, for example, if you want to force a subregion
to run in a particular ``dtype``. Disabling autocast gives you explicit control over
the execution type. In the subregion, inputs from the surrounding region
should be cast to ``dtype`` before use::
# Creates some tensors in default dtype (here assumed to be float32)
a_float32 = torch.rand((8, 8), device="cuda")
b_float32 = torch.rand((8, 8), device="cuda")
c_float32 = torch.rand((8, 8), device="cuda")
d_float32 = torch.rand((8, 8), device="cuda")
with autocast():
e_float16 = torch.mm(a_float32, b_float32)
with autocast(enabled=False):
# Calls e_float16.float() to ensure float32 execution
# (necessary because e_float16 was created in an autocasted region)
f_float32 = torch.mm(c_float32, e_float16.float())
# No manual casts are required when re-entering the autocast-enabled region.
# torch.mm again runs in float16 and produces float16 output, regardless of input types.
g_float16 = torch.mm(d_float32, f_float32)
The autocast state is thread-local. If you want it enabled in a new thread, the context manager or decorator
must be invoked in that thread. This affects :class:`torch.nn.DataParallel` and
:class:`torch.nn.parallel.DistributedDataParallel` when used with more than one GPU per process
(see :ref:`Working with Multiple GPUs<amp-multigpu>`).
Args:
device_type(str, required): Whether to use 'cuda' or 'cpu' device
enabled(bool, optional): Whether autocasting should be enabled in the region.
Default: ``True``
dtype(torch_dtype, optional): Whether to use torch.float16 or torch.bfloat16.
cache_enabled(bool, optional): Whether the weight cache inside autocast should be enabled.
Default: ``True``
"""
def __init__(self, device_type : str,
dtype : Optional[_dtype] = None,
enabled : bool = True,
cache_enabled : Optional[bool] = None):
if torch._jit_internal.is_scripting():
self._enabled = enabled
self.device = device_type
self.fast_dtype = dtype
# TODO: support get_autocast_gpu/cpu_dtype
assert dtype is not None
return
self.device = device_type
if self.device == 'cuda':
self.fast_dtype = torch.get_autocast_gpu_dtype()
elif self.device == 'cpu':
self.fast_dtype = torch.get_autocast_cpu_dtype()
elif self.device == 'xpu':
self.fast_dtype = torch.xpu.get_autocast_xpu_dtype() # type: ignore[attr-defined]
else:
raise RuntimeError('User specified autocast device_type must be \'cuda\' or \'cpu\'')
self._cache_enabled = torch.is_autocast_cache_enabled()
if enabled and torch.cuda.amp.common.amp_definitely_not_available() and self.device == 'cuda':
warnings.warn('User provided device_type of \'cuda\', but CUDA is not available. Disabling')
enabled = False
if dtype is not None:
self.fast_dtype = dtype
if cache_enabled is not None:
self._cache_enabled = cache_enabled
if self.device == 'cpu':
supported_dtype = [torch.bfloat16]
if self.fast_dtype not in supported_dtype:
error_message = 'In CPU autocast, but the target dtype is not supported. Disabling autocast.\n'
error_message += 'CPU Autocast only supports dtype of torch.bfloat16 currently.'
warnings.warn(error_message)
enabled = False
if self.device == 'xpu':
supported_dtype = [torch.bfloat16, torch.float16]
if self.fast_dtype not in supported_dtype:
error_message = 'In XPU autocast, but the target dtype is not supported. Disabling autocast.\n'
error_message += 'XPU Autocast only supports dtype of torch.bfloat16 currently.'
warnings.warn(error_message)
enabled = False
if self.device == 'cuda':
if self.fast_dtype == torch.bfloat16 and not torch.cuda.is_bf16_supported():
raise RuntimeError('Current CUDA Device does not support bfloat16. Please switch dtype to float16.')
self._enabled = enabled
def __enter__(self):
if torch._jit_internal.is_scripting():
assert self.fast_dtype is not None
return self
self.prev_cache_enabled = torch.is_autocast_cache_enabled()
if self.device == 'cpu':
self.prev = torch.is_autocast_cpu_enabled()
self.prev_fastdtype = torch.get_autocast_cpu_dtype()
torch.set_autocast_cpu_enabled(self._enabled)
torch.set_autocast_cpu_dtype(self.fast_dtype) # type: ignore[arg-type]
torch.autocast_increment_nesting()
elif self.device == 'xpu':
self.prev = torch.xpu.is_autocast_xpu_enabled() # type: ignore[attr-defined]
self.prev_fastdtype = torch.xpu.get_autocast_xpu_dtype() # type: ignore[attr-defined]
torch.xpu.set_autocast_xpu_enabled(self._enabled) # type: ignore[attr-defined]
torch.xpu.set_autocast_xpu_dtype(self.fast_dtype) # type: ignore[attr-defined]
torch.autocast_increment_nesting()
else:
self.prev = torch.is_autocast_enabled()
self.prev_fastdtype = torch.get_autocast_gpu_dtype()
torch.set_autocast_gpu_dtype(self.fast_dtype) # type: ignore[arg-type]
torch.set_autocast_enabled(self._enabled)
torch.autocast_increment_nesting()
torch.set_autocast_cache_enabled(self._cache_enabled)
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override]
if torch._jit_internal.is_scripting():
return
# Drop the cache when we exit to a nesting level that's outside any instance of autocast.
if self.device == 'cpu':
if torch.autocast_decrement_nesting() == 0:
torch.clear_autocast_cache()
torch.set_autocast_cpu_enabled(self.prev)
torch.set_autocast_cpu_dtype(self.prev_fastdtype)
elif self.device == 'xpu':
if torch.autocast_decrement_nesting() == 0:
torch.clear_autocast_cache()
torch.xpu.set_autocast_xpu_enabled(self.prev) # type: ignore[attr-defined]
torch.xpu.set_autocast_xpu_dtype(self.prev_fastdtype) # type: ignore[attr-defined]
else:
if torch.autocast_decrement_nesting() == 0:
torch.clear_autocast_cache()
torch.set_autocast_enabled(self.prev)
torch.set_autocast_gpu_dtype(self.prev_fastdtype)
torch.set_autocast_cache_enabled(self.prev_cache_enabled)
return False
def __call__(self, func):
if torch._jit_internal.is_scripting():
return func
return autocast_decorator(self, func)
| pytorch-master | torch/amp/autocast_mode.py |
from .autocast_mode import autocast
| pytorch-master | torch/amp/__init__.py |
import torch
from typing import Union
class _InsertPoint(object):
def __init__(self, insert_point_graph: torch._C.Graph, insert_point: Union[torch._C.Node, torch._C.Block]):
self.insert_point = insert_point
self.g = insert_point_graph
self.guard = None
def __enter__(self):
self.prev_insert_point = self.g.insertPoint()
self.g.setInsertPoint(self.insert_point)
def __exit__(self, *args):
self.g.setInsertPoint(self.prev_insert_point)
def insert_point_guard(self, insert_point: Union[torch._C.Node, torch._C.Block]):
return _InsertPoint(self, insert_point)
| pytorch-master | torch/jit/_ir_utils.py |
import torch
import inspect
import typing
import pathlib
import sys
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
from types import CodeType
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype # type: ignore[import]
from monkeytype import trace as monkeytype_trace
from monkeytype.db.base import CallTraceThunk, CallTraceStore, CallTraceStoreLogger # type: ignore[import]
from monkeytype.config import _startswith, LIB_PATHS # type: ignore[import]
from monkeytype.tracing import CallTrace, CodeFilter # type: ignore[import]
except ImportError:
_IS_MONKEYTYPE_INSTALLED = False
# Checks whether a class is defind in `torch.*` modules
def is_torch_native_class(cls):
if not hasattr(cls, '__module__'):
return False
parent_modules = cls.__module__.split('.')
if not parent_modules:
return False
root_module = sys.modules.get(parent_modules[0])
return root_module is torch
def get_type(type):
"""
Helper function which converts the given type to a torchScript acceptable format.
"""
if isinstance(type, str):
return type
elif inspect.getmodule(type) == typing:
# If the type is a type imported from typing
# like Tuple, List, Dict then replace `typing.`
# with a null string. This needs to be done since
# typing.List is not accepted by TorchScript.
type_to_string = str(type)
return type_to_string.replace(type.__module__ + '.', '')
elif is_torch_native_class(type):
# If the type is a subtype of torch module, then TorchScript expects a fully qualified name
# for the type which is obtained by combining the module name and type name.
return type.__module__ + '.' + type.__name__
else:
# For all other types use the name for the type.
return type.__name__
def get_optional_of_element_type(types):
"""
Helper function to extracts the type of the element to be annotated to Optional
from the list of consolidated types and returns `Optional[element type]`.
TODO: To remove this check once Union support lands.
"""
elem_type = types[1] if type(None) == types[0] else types[0]
elem_type = get_type(elem_type)
# Optional type is internally converted to Union[type, NoneType], which
# is not supported yet in TorchScript. Hence, representing the optional type as string.
return 'Optional[' + elem_type + ']'
def get_qualified_name(func):
return func.__qualname__
if _IS_MONKEYTYPE_INSTALLED:
class JitTypeTraceStoreLogger(CallTraceStoreLogger):
"""A JitTypeCallTraceLogger that stores logged traces in a CallTraceStore."""
def __init__(self, store: CallTraceStore):
super().__init__(store)
def log(self, trace: CallTrace) -> None:
self.traces.append(trace)
class JitTypeTraceStore(CallTraceStore):
def __init__(self):
super().__init__()
# A dictionary keeping all collected CallTrace
# key is fully qualified name of called function
# value is list of all CallTrace
self.trace_records: Dict[str, list] = defaultdict(list)
def add(self, traces: Iterable[CallTrace]):
for t in traces:
qualified_name = get_qualified_name(t.func)
self.trace_records[qualified_name].append(t)
def filter(
self,
qualified_name: str,
qualname_prefix: Optional[str] = None,
limit: int = 2000
) -> List[CallTraceThunk]:
return self.trace_records[qualified_name]
def analyze(self, qualified_name: str) -> Dict:
# Analyze the types for the given module
# and create a dictionary of all the types
# for arguments.
records = self.trace_records[qualified_name]
all_args = defaultdict(set)
for record in records:
for arg, arg_type in record.arg_types.items():
all_args[arg].add(arg_type)
return all_args
def consolidate_types(self, qualified_name: str) -> Dict:
all_args = self.analyze(qualified_name)
# If there are more types for an argument,
# then consolidate the type to `Any` and replace the entry
# by type `Any`.
for arg, types in all_args.items():
types = list(types)
type_length = len(types)
if type_length == 2 and type(None) in types:
# TODO: To remove this check once Union suppport in TorchScript lands.
all_args[arg] = get_optional_of_element_type(types)
elif type_length > 1:
all_args[arg] = 'Any'
elif type_length == 1:
all_args[arg] = get_type(types[0])
return all_args
def get_args_types(self, qualified_name: str) -> Dict:
return self.consolidate_types(qualified_name)
class JitTypeTraceConfig(monkeytype.config.Config):
def __init__(self, s: JitTypeTraceStore):
super().__init__()
self.s = s
def trace_logger(self) -> JitTypeTraceStoreLogger:
"""
Returns a JitCallTraceStoreLogger that logs to the configured
trace store.
"""
return JitTypeTraceStoreLogger(self.trace_store())
def trace_store(self) -> CallTraceStore:
return self.s
def code_filter(self) -> Optional[CodeFilter]:
return jit_code_filter
else:
# When MonkeyType is not installed, we provide dummy class definitions
# for the below classes.
class JitTypeTraceStoreLogger: # type: ignore[no-redef]
def __init__(self):
pass
class JitTypeTraceStore: # type: ignore[no-redef]
def __init__(self):
self.trace_records = None
class JitTypeTraceConfig: # type: ignore[no-redef]
def __init__(self):
pass
monkeytype_trace = None # noqa: F811
def jit_code_filter(code: CodeType) -> bool:
"""
Custom CodeFilter for Torchscript to trace forward calls.
The custom CodeFilter is required while scripting a FX Traced forward calls.
FX Traced forward calls have `code.co_filename` start with '<' which is used
to exclude tracing of stdlib and site-packages in the default code filter.
Since we need all forward calls to be traced, this custom code filter
checks for code.co_name to be 'forward' and enables tracing for all such calls.
The code filter is similar to default code filter for monkeytype and
excludes tracing of stdlib and site-packages.
"""
# Filter code without a source file and exclude this check for 'forward' calls.
if code.co_name != 'forward' and (not code.co_filename or code.co_filename[0] == '<'):
return False
filename = pathlib.Path(code.co_filename).resolve()
return not any(_startswith(filename, lib_path) for lib_path in LIB_PATHS)
| pytorch-master | torch/jit/_monkeytype_config.py |
import torch
from torch import Tensor
aten = torch.ops.aten
from typing import Optional, List, Dict, Set
import inspect
from torch.fx.operator_schemas import get_signature_for_torch_op
import warnings
decomposition_table: Dict[str, torch.jit.ScriptFunction] = {}
function_name_set: Set[str] = set()
def check_decomposition_has_type_annotations(f):
inspect_empty = inspect._empty # type: ignore[attr-defined]
sig = inspect.signature(f)
for param in sig.parameters.values():
assert param.annotation != inspect_empty, \
"No signature on param {name} for function {func}".format(name=param.name, func=f.name)
assert sig.return_annotation != inspect_empty, "No return annotation for function {func}".format(func=f.name)
def signatures_match(decomposition_sig, torch_op_sig):
decomp_params = decomposition_sig.parameters
op_params = torch_op_sig.parameters
if len(decomp_params) != len(op_params):
return False
for decomp_param, op_param in zip(decomp_params.values(), op_params.values()):
# can't check full equality yet because not all fields are correcly deduced
# in the torch_op_sig - like default value
# can't check 'kind' bc
# kwarg-only values with defaults not yet supported in TS
inspect_empty = inspect._empty # type: ignore[attr-defined]
for field in ['name', 'annotation']:
if field == 'name' and decomp_param.name == "self":
warnings.warn("PyTorch uses 'input' instead of 'self' on public api")
if getattr(decomp_param, field) != getattr(op_param, field):
return False
decomp_default = decomp_param.default
op_default = op_param.default
# default value not always correctly inferred as being present on torch schema,
# but if specified on both they should be equal
if decomp_default != inspect_empty and op_default != inspect_empty:
if decomp_default != op_default:
return False
return decomposition_sig.return_annotation == torch_op_sig.return_annotation
def register_decomposition(aten_op, registry=None):
def decomposition_decorator(f):
nonlocal registry
if registry is None:
registry = decomposition_table
check_decomposition_has_type_annotations(f)
torch_op_sigs, torch_op_schemas = get_signature_for_torch_op(aten_op, return_schemas=True)
decomposition_sig = inspect.signature(f)
found_index = None
for i, torch_op_sig in enumerate(torch_op_sigs):
if signatures_match(decomposition_sig, torch_op_sig):
found_index = i
break
assert found_index is not None, "Could not find matching signature: " + str(f)
# Need unique name for jit function serialization
assert f.__name__ not in function_name_set, "Duplicated function name {}".format(f.__name__)
function_name_set.add(f.__name__)
scripted_func = torch.jit.script(f)
torch._C._jit_pass_inline(scripted_func.graph)
for _ in range(2):
torch._C._jit_pass_peephole(scripted_func.graph)
torch._C._jit_pass_constant_propagation(scripted_func.graph)
registry[str(torch_op_schemas[found_index])] = scripted_func
return f
return decomposition_decorator
# TODO: replace torch.sigmoid -> aten.sigmoid
@register_decomposition(aten.var)
def var_decomposition(input: Tensor, dim: Optional[List[int]] = None, correction: Optional[int] = None,
keepdim: bool = False) -> Tensor:
if dim is None:
dim_i: List[int] = []
dim = dim_i
if isinstance(dim, (tuple, list)) and len(dim) == 0:
n = input.numel()
else:
n = 1
for dim_i in dim: # type: ignore[assignment]
n *= input.shape[dim_i] # type: ignore[call-overload]
mean = aten.mean(input, dim, True)
sub = input - mean
sq = sub * sub
sum = aten.sum(sq, dim, keepdim)
if correction is not None:
n = n - correction
return sum / n
@register_decomposition(aten.var)
def var(input: Tensor, unbiased: bool = True) -> Tensor:
return var_decomposition(input, correction=(1 if unbiased else 0))
| pytorch-master | torch/jit/_decompositions.py |
import inspect
import torch
import types
import collections
import textwrap
import functools
import warnings
import sys
from typing import Dict, List, Set, Type
import torch._jit_internal as _jit_internal
from torch._sources import fake_range
from torch.jit.frontend import get_default_args, get_jit_class_def, get_jit_def, get_class_properties
from torch.jit._builtins import _find_builtin
from torch.jit._check import AttributeTypeIsSupportedChecker
from torch.jit._state import _python_cu, _add_script_class, _get_script_class
from torch.nn import Module
ScriptMethodStub = collections.namedtuple('ScriptMethodStub', ('resolution_callback', 'def_', 'original_method'))
PropertyStub = collections.namedtuple('PropertyStub', ('resolution_callback', 'def_'))
# TODO: there should be a more principled way of doing this.
ignored_attributes = [
"_version",
"_parameters",
"_buffers",
"_non_persistent_buffers_set",
"_backward_hooks",
"_forward_hooks",
"_forward_pre_hooks",
"_state_dict_hooks",
"_load_state_dict_pre_hooks",
"_load_state_dict_post_hooks",
"_modules",
"_initializing",
"dump_patches",
]
def _compile_and_register_class(obj, rcb, qualified_name):
script_class = _get_script_class(obj)
if not script_class:
ast = get_jit_class_def(obj, obj.__name__)
defaults = torch.jit.frontend.get_default_args_for_class(obj)
script_class = torch._C._jit_script_class_compile(qualified_name, ast, defaults, rcb)
_add_script_class(obj, script_class)
return script_class
def make_stub(func, name):
rcb = _jit_internal.createResolutionCallbackFromClosure(func)
ast = get_jit_def(func, name, self_name="RecursiveScriptModule")
return ScriptMethodStub(rcb, ast, func)
def make_stub_from_method(nn_module, method_name):
func = getattr(nn_module, method_name)
if isinstance(func, ScriptMethodStub):
return func
# Make sure the name present in the resulting AST will match the name
# requested here. The only time they don't match is if you do something
# like:
# def _forward(self):
# pass
# forward = _forward
# In this case, the actual function object will have the name `_forward`,
# even though we requested a stub for `forward`.
return make_stub(func, method_name)
def make_stubs_from_exported_methods(mod):
stubs = []
for name in dir(mod):
item = getattr(mod, name, None)
if (
_jit_internal.get_torchscript_modifier(item)
is _jit_internal.FunctionModifiers.EXPORT
):
stubs.append(make_stub_from_method(mod, name))
return stubs
def jit_ignored_properties(module):
user_annotated_ignored_attributes = getattr(module, "__jit_ignored_attributes__", list())
def get_properties_names(module):
return set(k for k, v in vars(module).items() if isinstance(v, property))
properties = get_properties_names(type(module))
user_annoted_ignored_properties = set()
for ignored_attr in user_annotated_ignored_attributes:
if ignored_attr in properties:
user_annoted_ignored_properties.add(ignored_attr)
return user_annoted_ignored_properties
# base types that can be constants
# in addition, tuples and lists of these base types are also considered constants
# If you edit this list, then you also need to edit the handlers in
# ConstantValue in jit/script/init.cpp
_constant_types = (bool, float, int, str, type(None), torch.device, torch.layout, torch.dtype)
def _get_valid_constant(attr, v, owner_type):
if isinstance(v, _constant_types):
return v
elif isinstance(v, tuple) or isinstance(v, list):
return tuple(_get_valid_constant(attr, x, owner_type) for x in v)
constants = ", ".join(torch.typename(typ) for typ in _constant_types)
raise TypeError(textwrap.dedent("""
'{}' object in attribute '{}.{}' is not a valid constant.
Valid constants are:
1. a nn.ModuleList
2. a value of type {{{}}}
3. a list or tuple of (2)
""".format(torch.typename(type(v)), owner_type, attr, constants)))
class SourceContext(torch._C._jit_tree_views.SourceRangeFactory):
def __init__(self, source, filename, file_lineno, leading_whitespace_len):
super(SourceContext, self).__init__(source, filename, file_lineno, leading_whitespace_len)
def infer_concrete_type_builder(nn_module, share_types=True):
"""
Build a ConcreteModuleTypeBuilder from an nn.Module. This
ConcreteModuleType doesn't have a JIT type associated with it yet, it
must be filled in by the caller.
"""
concrete_type_builder = torch._C.ConcreteModuleTypeBuilder(type(nn_module))
if isinstance(nn_module, (torch.nn.ModuleDict)):
concrete_type_builder.set_module_dict()
if isinstance(nn_module, (torch.nn.ModuleList, torch.nn.Sequential)):
concrete_type_builder.set_module_list()
if isinstance(nn_module, (torch.nn.ParameterList)):
concrete_type_builder.set_parameter_list()
if isinstance(nn_module, (torch.nn.ParameterDict)):
concrete_type_builder.set_parameter_dict()
def get_annotations(obj):
if sys.version_info < (3, 10):
return getattr(obj, '__annotations__', {})
# In Python-3.10+ it is recommended to use inspect.get_annotations
# See https://docs.python.org/3.10/howto/annotations.html
# But also, in 3.10 annotations from base class are not inherited
# by unannotated derived one, so they must be manually extracted
annotations = inspect.get_annotations(obj)
if len(annotations) > 0:
return annotations
cls = obj if isinstance(obj, type) else type(obj)
if len(cls.__bases__) == 0:
return {}
return inspect.get_annotations(cls.__bases__[0])
class_annotations = get_annotations(nn_module)
if isinstance(nn_module, (torch.ao.quantization.QuantWrapper)):
class_annotations = {}
# Get user-annotated ignored attributes.
user_annotated_ignored_attributes = getattr(nn_module, "__jit_ignored_attributes__", list())
concrete_type_builder.add_ignored_attributes(user_annotated_ignored_attributes)
ignored_properties = jit_ignored_properties(nn_module)
# try to infer the type from type annotation or from the object itself
def infer_type(name, item):
# The forward function from Module is special; never use this annotations; we
# need to infer type directly using JIT. I originally wanted to write
# this test as isinstance(class_annotations[name], Callable) but
# isinstance on typing things doesn't seem to work: isinstance(list, Callable)
# is also true!
inferred = False
try:
if name in class_annotations and class_annotations[name] != torch.nn.Module.__annotations__["forward"]:
ann_to_type = torch.jit.annotations.ann_to_type(class_annotations[name], fake_range())
attr_type = torch._C.InferredType(ann_to_type)
elif isinstance(item, torch.jit.Attribute):
ann_to_type = torch.jit.annotations.ann_to_type(item.type, fake_range())
attr_type = torch._C.InferredType(ann_to_type)
else:
attr_type = torch._C._jit_try_infer_type(item)
inferred = True
except RuntimeError as re:
raise RuntimeError(
"Error inferring type for {name}: {item}: {re}".format(name=name, item=item, re=re)
)
return attr_type, inferred
added_names = set()
for name, item in nn_module._parameters.items():
if name in user_annotated_ignored_attributes:
continue
assert item is None or isinstance(item, torch.Tensor)
attr_type, _ = infer_type(name, item)
# We currently have the invariant in various places in our code
# that parameters must be Tensors. However, the nn.Module API also
# allows NoneType parameters. These parameters are not returned as
# part of `parameters()` and its variants, but are available
# through direct attribute access.
concrete_type_builder.add_attribute(name, attr_type.type(), True, False)
added_names.add(name)
for name, item in nn_module._buffers.items():
if name in user_annotated_ignored_attributes:
continue
assert item is None or isinstance(item, torch.Tensor)
attr_type, _ = infer_type(name, item)
concrete_type_builder.add_attribute(name, attr_type.type(), False, True)
added_names.add(name)
for name, item in nn_module._modules.items():
if name in user_annotated_ignored_attributes:
continue
attr_type, _ = infer_type(name, item)
if item is None:
# Modules can be None. We don't have direct support for optional
# Modules, so the register it as an NoneType attribute instead.
concrete_type_builder.add_attribute(name, attr_type.type(), False, False)
continue
if attr_type.success():
assert attr_type.type().is_interface_type()
# if the type can be inferred, it should be a module interface type
sub_concrete_type = torch._C.ConcreteModuleType.from_jit_type(attr_type.type())
else:
# otherwise we get the concrete module type for item and add it to concrete_type
sub_concrete_type = get_module_concrete_type(item, share_types)
concrete_type_builder.add_module(name, sub_concrete_type)
added_names.add(name)
# populate constants_set
constants_set = set(getattr(nn_module, "__constants__", ()))
# Constants annotated via `Final[T]` rather than being added to `__constants__`
for name, ann in class_annotations.items():
if torch._jit_internal.is_final(ann):
constants_set.add(name)
for name in constants_set:
if name in added_names:
# TODO: We should really error in this case, but its bc-breaking so
# we need to warn for at least one release
if name in nn_module._modules:
hint = "submodule"
elif name in nn_module._buffers:
hint = "buffer"
elif name in nn_module._parameters:
hint = "parameter"
else:
raise AssertionError("added_names must be submodule, parameter, or buffer")
warnings.warn("'{}' was found in ScriptModule constants, "
" but it is a non-constant {}. Consider removing it.".format(name, hint))
continue
if not hasattr(nn_module, name):
# TODO: We should really error in this case, but its bc-breaking so
# we need to warn for at least one release
warnings.warn("'{}' was found in ScriptModule constants, "
"but was not actually set in __init__. "
"Consider removing it.".format(name))
continue
value = getattr(nn_module, name)
concrete_type_builder.add_constant(name, _get_valid_constant(name, value, type(nn_module).__name__))
added_names.add(name)
# populate overloads
overloads = getattr(nn_module, "__overloads__", {})
# update with any annotated overloads
overloads.update(get_overload_name_mapping(get_overload_annotations(nn_module, ignored_properties)))
for name, overloaded_names in overloads.items():
concrete_type_builder.add_overload(name, overloaded_names)
for name, value in nn_module.__dict__.items():
if name in ignored_attributes or name.startswith("__"):
# Python objects have lots of random attributes attached to them;
# PyTorch adds a few more. Prevent these from getting compiled.
continue
if name in user_annotated_ignored_attributes:
continue
if name in added_names:
# Don't re-add anything we already added
continue
isoverloadpacket = isinstance(value, torch._ops.OpOverloadPacket)
if isoverloadpacket:
value = value.op
# Handle Python function attributes
if inspect.isfunction(value):
try:
scripted_fn = torch.jit.script(value)
concrete_type_builder.add_function_attribute(
name,
torch._C._jit_try_infer_type(scripted_fn).type(),
value)
except Exception as e:
# If we fail to script the function, it isn't a hard error.
# Instead, we will add it to the list of attributes we failed
# to convert, with the compilation error.
hint = ("(This function exists as an attribute on the Python module, "
"but we failed to compile it to a TorchScript function. "
"\nThe error stack is reproduced here:\n{}").format(e)
concrete_type_builder.add_failed_attribute(name, hint)
pass
continue
# Handle calls to builtin functions (either bespoke builtins from torch.jit._builtins or
# a call to an aten function like torch.add)
builtin_symbol_name = _find_builtin(value)
if builtin_symbol_name:
concrete_type_builder.add_builtin_function(name, builtin_symbol_name)
continue
# Handle Script function attributes
if isinstance(value, torch.jit.ScriptFunction):
concrete_type_builder.add_function_attribute(
name,
torch._C._jit_try_infer_type(value).type(),
value)
continue
# If we got here, this is a regular "data" attribute, add it to the concrete type
attr_type, inferred = infer_type(name, value)
if attr_type.success():
concrete_type_builder.add_attribute(name, attr_type.type(), False, False)
else:
# TODO: could add more detail here. For example, what the user should do
# when the pytype is `list` or `NoneType`
inferred_msg = "Its type was inferred; try adding a type annotation for the attribute." if inferred else ""
additional_info = f"{attr_type.reason()}. {inferred_msg}"
hint = "(This attribute exists on the Python module, " \
f"but we failed to convert Python type: '{torch.typename(type(value))}' " \
f"to a TorchScript type. {additional_info})"
concrete_type_builder.add_failed_attribute(name, hint)
# add hooks to concrete type
for hook in nn_module._forward_hooks.values():
concrete_type_builder.add_forward_hook(hook)
for pre_hook in nn_module._forward_pre_hooks.values():
concrete_type_builder.add_forward_pre_hook(pre_hook)
return concrete_type_builder
class ConcreteTypeStore(object):
type_store: Dict[Type[Module], List[torch._C.ConcreteModuleType]]
methods_compiled: Set[torch._C.ConcreteModuleType]
def __init__(self):
# Python module type => List[ConcreteModuleType)]
self.type_store = {}
# ConcreteTypes that have had their methods already compiled
self.methods_compiled = set()
def get_or_create_concrete_type(self, nn_module):
"""
Infer a ConcreteType from this `nn.Module` instance. Underlying JIT
types are re-used if possible.
"""
concrete_type_builder = infer_concrete_type_builder(nn_module)
nn_module_type = type(nn_module)
if nn_module_type not in self.type_store:
self.type_store[nn_module_type] = []
# Search the type store for an already-available JIT type
known_types = self.type_store[nn_module_type]
for known_type in known_types:
if known_type.equals(concrete_type_builder):
return known_type
# We didn't find anything; generate a new JIT type from this concrete type
concrete_type = concrete_type_builder.build()
self.type_store[nn_module_type].append(concrete_type)
return concrete_type
concrete_type_store = ConcreteTypeStore()
def create_methods_and_properties_from_stubs(concrete_type, method_stubs, property_stubs):
method_defs = [m.def_ for m in method_stubs]
method_rcbs = [m.resolution_callback for m in method_stubs]
method_defaults = [get_default_args(m.original_method) for m in method_stubs]
property_defs = [p.def_ for p in property_stubs]
property_rcbs = [p.resolution_callback for p in property_stubs]
concrete_type._create_methods_and_properties(property_defs, property_rcbs, method_defs, method_rcbs, method_defaults)
def create_hooks_from_stubs(concrete_type, hook_stubs, pre_hook_stubs):
hook_defs = [h.def_ for h in hook_stubs]
hook_rcbs = [h.resolution_callback for h in hook_stubs]
pre_hook_defs = [h.def_ for h in pre_hook_stubs]
pre_hook_rcbs = [h.resolution_callback for h in pre_hook_stubs]
concrete_type._create_hooks(hook_defs, hook_rcbs, pre_hook_defs, pre_hook_rcbs)
def get_module_concrete_type(nn_module, share_types=True):
"""
Gets a concrete type for nn_modules. If share_types is True, the concrete
type is fetched from concrete_type_store. If it is False, a new concrete type
is created without first searching concrete_type_store.
Args:
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
share_types = Whether to share underlying JIT types between modules (if possible).
Returns:
A concrete type for nn_module.
"""
assert isinstance(nn_module, Module)
if isinstance(nn_module, torch.jit.ScriptModule) and \
hasattr(nn_module, "_concrete_type"):
return nn_module._concrete_type
if share_types:
# Look into the store of cached JIT types
concrete_type = concrete_type_store.get_or_create_concrete_type(nn_module)
else:
# Get a concrete type directly, without trying to re-use an existing JIT
# type from the type store.
concrete_type_builder = infer_concrete_type_builder(nn_module, share_types)
concrete_type_builder.set_poisoned()
concrete_type = concrete_type_builder.build()
return concrete_type
def create_script_class(obj):
"""
Create and return a RecursiveScriptClass instance from a Python object.
Arguments:
obj: A Python object.
"""
qualified_class_name = _jit_internal._qualified_name(type(obj))
rcb = _jit_internal.createResolutionCallbackForClassMethods(type(obj))
# Script the type of obj if it hasn't already been scripted.
_compile_and_register_class(type(obj), rcb, qualified_class_name)
class_ty = _python_cu.get_class(qualified_class_name)
# Create an empty torch._C.ScriptObject with the scripted type.
cpp_object = torch._C._create_object_with_type(class_ty)
# Copy all of the attributes over to the torch._C.ScriptObject.
for name, value in obj.__dict__.items():
cpp_object.setattr(name, value)
# Wrap the torch._C.ScriptObject in a RecursiveScriptClass instance.
return wrap_cpp_class(cpp_object)
def create_script_module(nn_module, stubs_fn, share_types=True, is_tracing=False):
"""
Creates a new ScriptModule from an nn.Module
Args:
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
stubs_fn: Lambda that takes an nn.Module and generates a list of ScriptMethodStubs to compile.
share_types: Whether to share underlying JIT types between modules (if possible).
NOTE: Only set to False this when we cannot guarantee type sharing will work
correctly. This only happens today for traced modules, where the same
module can produce different traced methods depending on the inputs.
is_tracing: Whether this function is called during tracing or scripting. If tracing,
we don't need to do AttributeTypeIsSupportedChecker because all the unsupported
attributes will be baked as constant in the tracing graph. In addition,
this check significantly slows down the traced modules when the module size is big.
"""
assert not isinstance(nn_module, torch.jit.RecursiveScriptModule)
check_module_initialized(nn_module)
concrete_type = get_module_concrete_type(nn_module, share_types)
if not is_tracing:
AttributeTypeIsSupportedChecker().check(nn_module)
return create_script_module_impl(nn_module, concrete_type, stubs_fn)
def create_script_module_impl(nn_module, concrete_type, stubs_fn):
"""
Convert an nn.Module to a RecursiveScriptModule.
Args:
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
concrete_type: The fully initialized ConcreteType of the module.
stubs_fn: Lambda that takes an nn.Module and generates a list of ScriptMethodStubs to compile.
"""
cpp_module = torch._C._create_module_with_type(concrete_type.jit_type)
method_stubs = stubs_fn(nn_module)
property_stubs = get_property_stubs(nn_module)
hook_stubs, pre_hook_stubs = get_hook_stubs(nn_module)
user_annotated_ignored_attributes = getattr(nn_module, "__jit_ignored_attributes__", list())
ignored_properties = jit_ignored_properties(nn_module)
def init_fn(script_module):
# Initialize the ScriptModule:
# 1. Copy the attributes/parameters/buffers from the original `nn_module` to the new ScriptModule.
for name, (attr_type, is_param) in concrete_type.get_attributes().items():
orig_value = getattr(nn_module, name)
orig_value = orig_value.value if isinstance(orig_value, torch.jit.Attribute) else orig_value
cpp_module.setattr(name, orig_value)
# 2. Copy the submodules from the original `nn_module` to the new ScriptModule,
# recursively scripting them.
for name, sub_concrete_type in concrete_type.get_modules():
orig_value = getattr(nn_module, name)
assert isinstance(orig_value, Module), "Expected Module but got {}".format(type(orig_value))
module_type = sub_concrete_type.jit_type
if isinstance(module_type, torch._C.InterfaceType):
# use the interface inference rule to compile the module
scripted = interface_script(module_type, orig_value)
elif isinstance(orig_value, torch.jit.ScriptModule):
scripted = orig_value
else:
# always reuse the provided stubs_fn to infer the methods to compile
scripted = create_script_module_impl(orig_value, sub_concrete_type, stubs_fn)
cpp_module.setattr(name, scripted)
script_module._modules[name] = scripted
# 3. Copy @ignored/@unused methods and attrs from the original `nn_module` to the new ScriptModule.
# This ensures we can access these Python methods on the ScriptModule.
for name in dir(nn_module):
if name in ignored_properties:
continue
item = getattr(nn_module, name, None)
if inspect.ismethod(item) and _jit_internal.is_ignored_fn(item):
unbound_function = getattr(nn_module, name).__func__
bound_method = unbound_function.__get__(script_module)
setattr(script_module, name, bound_method)
elif concrete_type.is_ignored_attribute(name):
setattr(script_module, name, item)
# For convenience, attach the concrete type to the new ScriptModule
script_module._concrete_type = concrete_type
# Actually create the ScriptModule, initializing it with the function we just defined
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
# Compile methods if necessary
if concrete_type not in concrete_type_store.methods_compiled:
create_methods_and_properties_from_stubs(concrete_type, method_stubs, property_stubs)
# Create hooks after methods to ensure no name collisions between hooks and methods.
# If done before, hooks can overshadow methods that aren't exported.
create_hooks_from_stubs(concrete_type, hook_stubs, pre_hook_stubs)
torch._C._run_emit_module_hook(cpp_module)
concrete_type_store.methods_compiled.add(concrete_type)
# Copy the forward hooks and pre-hooks to the new ScriptModule
# to allow the hooks to be run from eager as ScriptFunctions
for idx, fn in enumerate(script_module._c._get_forward_pre_hooks()):
script_module._forward_pre_hooks[idx] = fn
for idx, fn in enumerate(script_module._c._get_forward_hooks()):
script_module._forward_hooks[idx] = fn
# Special handling so methods like __len__ work in script methods on classes derived from containers
if isinstance(nn_module, (torch.nn.ModuleList, torch.nn.Sequential, torch.nn.ModuleDict)) and \
'__len__' not in cpp_module._method_names():
script_module.define("def __len__(self):\n return {}\n".format(len(nn_module)))
if isinstance(nn_module, torch.nn.ModuleDict) and \
'__contains__' not in cpp_module._method_names():
if len(nn_module.keys()):
keys = repr(list(nn_module.keys()))
script_module.define("def __contains__(self, key: str):\n return key in {}\n".format(keys))
else:
script_module.define("def __contains__(self, key: str):\n return False\n")
# Make the compiled methods available to the Python ScriptModule class.
for method_stub in method_stubs:
if method_stub.original_method is None:
# define()'d methods don't have an Python original_method, so we
# don't need to do any Python re-wrapping stuff
continue
name = method_stub.original_method.__name__
if name != method_stub.def_.name().name:
# TODO: Why skip this? Because @torch.jit._overload_method will
# mangle the name of the function.
continue
script_method = cpp_module._get_method(name)
# Wrap the original to propagate docstrings and such.
# TODO: we don't currently do this functions that are recursively
# compiled, we should.
wrapped_script_method = functools.wraps(method_stub.original_method)(script_method)
# Add the methods to the script_module directly. This ensures they will
# be found first when `name` is looked up (as opposed to the stubs or
# nn.Module.forward)
script_module.__dict__[name] = wrapped_script_method
# Make module properties available on the Python ScriptModule class.
for property_stub in property_stubs:
property_name = property_stub.def_.name().name
fget = cpp_module._get_method(property_stub.def_.getter_name().name)
# Setter is optional, so it may not exist.
setter_name = property_stub.def_.setter_name()
fset = cpp_module._get_method(setter_name.name) if setter_name else None
script_module.__dict__[property_name] = property(property_name, fget, fset) # type: ignore[arg-type]
# copy over python methods to script module if they aren't defined on the script module
# this is currently an internal api used only on module containers
for name in dir(nn_module):
if name in ignored_properties:
continue
item = getattr(nn_module, name, None)
if _jit_internal.get_torchscript_modifier(item) is _jit_internal.FunctionModifiers.COPY_TO_SCRIPT_WRAPPER:
add_python_attr_to_scripted_model(script_module, nn_module, name)
return script_module
# We define shims of certain attributes on the RecursiveScriptModule to support
# magic methods. To check if a script model defines an attribute we need
# to also check that the attribute is not the shim
def script_model_defines_attr(script_model, attr):
script_attr = getattr(script_model, attr, None)
if script_attr is None:
return False
default_attr = getattr(torch.jit.RecursiveScriptModule, attr, None)
if default_attr is None:
return False
return script_attr != default_attr
def add_python_attr_to_scripted_model(script_model, orig, attr):
if hasattr(orig, attr) and script_model_defines_attr(script_model, attr):
setattr(script_model, attr, getattr(orig, attr))
def get_overload_annotations(mod, jit_ignored_properties):
# original function => [(mangled overload name, overload function)]
overloads = {}
for name in dir(type(mod)):
if name in jit_ignored_properties:
continue
item = getattr(mod, name, None)
if not callable(item):
continue
# builtin functions like repr() in python 2 do not have __module__ defined
if hasattr(item, "__module__") and item.__module__ is not None:
method_overloads = _jit_internal._get_overloaded_methods(item, mod.__class__)
if method_overloads is None:
continue
if item.__func__ in method_overloads:
raise RuntimeError(_jit_internal.get_overload_no_implementation_error_message(
'method', item.__func__))
names = [name + "__" + str(i) for i in range(len(method_overloads))]
overloads[item] = list(zip(names, method_overloads))
return overloads
def get_overload_name_mapping(overload_info):
# Same format as __overloads__
# original function => [overload names]
overload_name_mappings: Dict[str, List[str]] = {}
for orig_fn, overloads in overload_info.items():
original_name = orig_fn.__name__
if original_name not in overload_name_mappings:
overload_name_mappings[original_name] = []
for overload_name, _ in overloads:
overload_name_mappings[original_name].append(overload_name)
return overload_name_mappings
def _check_no_signature(func):
signature = torch.jit.annotations.get_signature(func, None, fake_range(), inspect.ismethod(func))
if signature is None:
qual_name = _jit_internal._qualified_name(func)
raise RuntimeError("Must explicitly add type annotations to overloaded functions: {}".format(qual_name))
def make_stubs_for_overloads(overload_info):
overload_stubs = []
for orig_fn, overloads in overload_info.items():
orig_ast = get_jit_def(orig_fn, orig_fn.__name__, self_name="RecursiveScriptModule")
for overload_name, overload_fn in overloads:
_check_no_signature(overload_fn)
over_ast = get_jit_def(overload_fn, overload_fn.__name__, self_name="RecursiveScriptModule")
new_ast = torch._C._replace_overloaded_method_decl(over_ast.decl(), orig_ast, overload_name)
_rcb = _jit_internal.createResolutionCallbackFromClosure(orig_fn)
overload_stubs.append(ScriptMethodStub(_rcb, new_ast, overload_fn))
return overload_stubs
def check_module_initialized(mod):
assert isinstance(mod, torch.nn.Module)
if not hasattr(mod, '_parameters'):
raise RuntimeError("'{}' has not been initialized, did you forget to call 'super()'?"
.format(torch.typename(type(mod))))
# This is to avoid importing torch.distributed.nn
if not hasattr(mod, 'remote_parameters'):
for name, param in mod._parameters.items():
if param is not None and torch.nn.parameter.is_lazy(param):
raise RuntimeError("'{}' has uninitialized parameters {}. Did you forget to run a forward pass?"
.format(torch.typename(type(mod)), name))
for name, buf in mod._buffers.items():
if buf is not None and torch.nn.parameter.is_lazy(buf):
raise RuntimeError("'{}' has uninitialized buffers {}. Did you forget to run a forward pass?"
.format(torch.typename(type(mod)), name))
def infer_methods_to_compile(nn_module):
"""
Implements the default rules for which methods should act as starting
points for compilation (TODO add a link when the rules are published).
"""
check_module_initialized(nn_module)
user_annotated_ignored_attributes = getattr(nn_module, "__jit_ignored_attributes__", list())
ignored_properties = jit_ignored_properties(nn_module)
methods: List[str] = []
if hasattr(nn_module, 'forward') and not _jit_internal.is_ignored_fn(nn_module.forward):
forward_func = getattr(nn_module.forward, "__func__", None)
module_forward = getattr(torch.nn.Module, "forward", None)
if forward_func != module_forward:
methods = ['forward']
exported = []
for name in dir(nn_module):
if name in ignored_properties:
continue
item = getattr(nn_module, name, None)
if _jit_internal.get_torchscript_modifier(item) is _jit_internal.FunctionModifiers.EXPORT:
exported.append(name)
methods = methods + exported
overload_name_mappings = dict(getattr(nn_module, "__overloads__", {}))
overload_info = get_overload_annotations(nn_module, ignored_properties)
overload_name_mappings.update(get_overload_name_mapping(overload_info))
overload_stubs = make_stubs_for_overloads(overload_info)
nn_module.__overloads__ = overload_name_mappings
# we shouldn't directly compile overloaded methods, just its overloads
def ignore_overloaded(method_name):
return method_name not in overload_name_mappings
filtered_methods = filter(ignore_overloaded, methods)
# Unique the methods. We don't want to use a set to store the methods because it
# introduces non-determinism to compile order.
uniquer: Set[str] = set()
uniqued_methods = []
for name in filtered_methods:
if name in uniquer:
continue
uniqued_methods.append(name)
uniquer.add(name)
stubs = []
for method in uniqued_methods:
stubs.append(make_stub_from_method(nn_module, method))
return overload_stubs + stubs
def get_hook_stubs(nn_module):
"""
Returns forward hook and pre_hook ScriptModuleStubs
"""
check_module_initialized(nn_module)
hook_map: Dict = {}
hook_stubs = []
for hook in nn_module._forward_hooks.values():
if hook.__name__ in hook_map:
if id(hook) != id(hook_map[hook.__name__]):
raise RuntimeError(
f"Hook '{hook.__name__}' on {type(nn_module).__name__} "
"has at least two different python definitions."
" Please use unique names for all hooks."
)
else:
hook_map[hook.__name__] = hook
hook_stubs.append(make_stub(hook, hook.__name__))
pre_hook_stubs = []
for pre_hook in nn_module._forward_pre_hooks.values():
if pre_hook.__name__ in hook_map:
if id(pre_hook) != id(hook_map[pre_hook.__name__]):
raise RuntimeError(
f"Pre-hook '{pre_hook.__name__}' on {type(nn_module).__name__} "
"has at least two different python definitions."
" Please use unique names for all hooks."
)
else:
hook_map[pre_hook.__name__] = pre_hook
pre_hook_stubs.append(make_stub(pre_hook, pre_hook.__name__))
return hook_stubs, pre_hook_stubs
def get_property_stubs(nn_module):
"""
Create property stubs for the properties of the module by creating method
stubs for the getter and setter.
"""
module_ty = type(nn_module)
properties_asts = get_class_properties(module_ty, self_name="RecursiveScriptModule")
rcbs = {}
for name in dir(module_ty):
item = getattr(module_ty, name, None)
if isinstance(item, property):
if not item.fget:
raise RuntimeError(f'Property {name} of {nn_module.__name__} must have a getter')
rcbs[name] = _jit_internal.createResolutionCallbackFromClosure(item.fget)
stubs = [PropertyStub(rcbs[ast.name().name], ast) for ast in properties_asts]
return stubs
def interface_script(mod_interface, nn_module):
"""
Makes a ScriptModule from an nn.Module, using the interface methods rule for
determining which methods to compile.
Args:
mod_interface: the interface type that the module have
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
"""
if isinstance(nn_module, torch.jit.ScriptModule):
return nn_module
check_module_initialized(nn_module)
def infer_interface_methods_to_compile(nn_module):
"""
Rule to infer the methods from the interface type to know which
methods need to act as starting points for compilation.
"""
stubs = []
for method in mod_interface.getMethodNames():
stubs.append(make_stub_from_method(nn_module, method))
return stubs
return create_script_module(nn_module, infer_interface_methods_to_compile)
def try_compile_fn(fn, loc):
if _jit_internal.is_ignored_fn(fn):
# Don't do anything for @ignore'd functions
return None
if isinstance(fn, torch.nn.Module):
# Since modules are callable pybind recognizes them as functions, but
# don't do anything for them
return None
if not inspect.isfunction(fn) and not inspect.ismethod(fn):
raise RuntimeError("`{}` is not a function. Recursive scripting only supports "
"Python functions or methods currently.\n"
"Consider manually annotating `{}` with @torch.jit.script.".format(fn, fn))
# We don't have the actual scope where the function was defined, but we can
# extract the necessary info from the closed over variables on the function
# object
rcb = _jit_internal.createResolutionCallbackFromClosure(fn)
return torch.jit.script(fn, _rcb=rcb)
def wrap_cpp_class(cpp_class):
"""
Wrap this torch._C.Object in a Python RecursiveScriptClass.
"""
return torch.jit.RecursiveScriptClass(cpp_class)
def wrap_cpp_module(cpp_module):
"""
Wrap this torch._C.ScriptModule in a Python ScriptModule, recursively for all submodules
"""
def init_fn(script_module):
for name, cpp_module in torch._C.ModuleDict(script_module._c).items():
setattr(script_module, name, wrap_cpp_module(cpp_module))
script_module._concrete_type = torch._C.ConcreteModuleType.from_jit_type(script_module._c._type())
for idx, fn in enumerate(script_module._c._get_forward_pre_hooks()):
script_module._forward_pre_hooks[idx] = fn
for idx, fn in enumerate(script_module._c._get_forward_hooks()):
script_module._forward_hooks[idx] = fn
return torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
def compile_unbound_method(concrete_type, fn):
if _jit_internal.is_ignored_fn(fn):
return None
stub = make_stub(fn, fn.__name__)
with torch._jit_internal._disable_emit_hooks():
# We don't want to call the hooks here since the graph that is calling
# this function is not yet complete
create_methods_and_properties_from_stubs(concrete_type, (stub,), ())
return stub
def lazy_bind(concrete_type, unbound_method):
"""
Returns a function that lazily binds `unbound_method` to a provided
Module IValue, then invokes the method. We do this so that any Python
shenanigans that will poison type sharing are impossible at compile
time.
"""
def lazy_binding_method(cpp_module, *args):
def init_fn(script_module):
orig_class = concrete_type.py_class
# Copy @ignored/@unused methods from the original module to the new one.
# This ensures they are available during execution.
for name in dir(orig_class):
item = getattr(orig_class, name, None)
if _jit_internal.is_ignored_fn(item):
setattr(script_module, name, item)
# Copy constants over so they are available during execution.
for name, value in concrete_type.get_constants().items():
setattr(script_module, name, value)
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
method = types.MethodType(unbound_method, script_module)
return method(*args)
# make the lazy binding method "look like" the original method
lazy_binding_method.original_fn = unbound_method # type: ignore[attr-defined]
lazy_binding_method.__name__ = unbound_method.__name__
torch._jit_internal.copy_torchscript_modifier(unbound_method, lazy_binding_method)
return lazy_binding_method
| pytorch-master | torch/jit/_recursive.py |
import torch
add_stat_value = torch.ops.prim.AddStatValue
set_logger = torch._C._logging_set_logger
LockingLogger = torch._C.LockingLogger
AggregationType = torch._C.AggregationType
NoopLogger = torch._C.NoopLogger
time_point = torch.ops.prim.TimePoint
| pytorch-master | torch/jit/_logging.py |
"""Serialization
This module contains functionality for serializing TorchScript modules, notably:
* torch.jit.save
* torch.jit.load
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
import os
import pathlib
import torch
from torch._six import string_classes
from torch.jit._recursive import wrap_cpp_module
from torch.serialization import validate_cuda_device
def save(m, f, _extra_files=None):
r"""
Save an offline version of this module for use in a separate process. The
saved module serializes all of the methods, submodules, parameters, and
attributes of this module. It can be loaded into the C++ API using
``torch::jit::load(filename)`` or into the Python API with
:func:`torch.jit.load <torch.jit.load>`.
To be able to save a module, it must not make any calls to native Python
functions. This means that all submodules must be subclasses of
:class:`ScriptModule` as well.
.. DANGER::
All modules, no matter their device, are always loaded onto the CPU
during loading. This is different from :func:`torch.load`'s semantics
and may change in the future.
Args:
m: A :class:`ScriptModule` to save.
f: A file-like object (has to implement write and flush) or a string
containing a file name.
_extra_files: Map from filename to contents which will be stored as part of `f`.
.. note::
torch.jit.save attempts to preserve the behavior of some operators
across versions. For example, dividing two integer tensors in
PyTorch 1.5 performed floor division, and if the module
containing that code is saved in PyTorch 1.5 and loaded in PyTorch 1.6
its division behavior will be preserved. The same module saved in
PyTorch 1.6 will fail to load in PyTorch 1.5, however, since the
behavior of division changed in 1.6, and 1.5 does not know how to
replicate the 1.6 behavior.
Example:
.. testcode::
import torch
import io
class MyModule(torch.nn.Module):
def forward(self, x):
return x + 10
m = torch.jit.script(MyModule())
# Save to file
torch.jit.save(m, 'scriptmodule.pt')
# This line is equivalent to the previous
m.save("scriptmodule.pt")
# Save to io.BytesIO buffer
buffer = io.BytesIO()
torch.jit.save(m, buffer)
# Save with extra files
extra_files = {'foo.txt': b'bar'}
torch.jit.save(m, 'scriptmodule.pt', _extra_files=extra_files)
"""
if _extra_files is None:
_extra_files = {}
if isinstance(f, str) or isinstance(f, pathlib.Path):
m.save(f, _extra_files=_extra_files)
else:
ret = m.save_to_buffer(_extra_files=_extra_files)
f.write(ret)
def load(f, map_location=None, _extra_files=None):
r"""
Load a :class:`ScriptModule` or :class:`ScriptFunction` previously
saved with :func:`torch.jit.save <torch.jit.save>`
All previously saved modules, no matter their device, are first loaded onto CPU,
and then are moved to the devices they were saved from. If this fails (e.g.
because the run time system doesn't have certain devices), an exception is
raised.
Args:
f: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
map_location (string or torch.device): A simplified version of
``map_location`` in `torch.jit.save` used to dynamically remap
storages to an alternative set of devices.
_extra_files (dictionary of filename to content): The extra
filenames given in the map would be loaded and their content
would be stored in the provided map.
Returns:
A :class:`ScriptModule` object.
Example:
.. testcode::
import torch
import io
torch.jit.load('scriptmodule.pt')
# Load ScriptModule from io.BytesIO object
with open('scriptmodule.pt', 'rb') as f:
buffer = io.BytesIO(f.read())
# Load all tensors to the original device
torch.jit.load(buffer)
# Load all tensors onto CPU, using a device
buffer.seek(0)
torch.jit.load(buffer, map_location=torch.device('cpu'))
# Load all tensors onto CPU, using a string
buffer.seek(0)
torch.jit.load(buffer, map_location='cpu')
# Load with extra files.
extra_files = {'foo.txt': ''} # values will be replaced with data
torch.jit.load('scriptmodule.pt', _extra_files=extra_files)
print(extra_files['foo.txt'])
.. testoutput::
:hide:
...
.. testcleanup::
import os
os.remove("scriptmodule.pt")
"""
if isinstance(f, string_classes):
if not os.path.exists(f): # type: ignore[type-var]
raise ValueError("The provided filename {} does not exist".format(f)) # type: ignore[str-bytes-safe]
if os.path.isdir(f):
raise ValueError("The provided filename {} is a directory".format(f)) # type: ignore[str-bytes-safe]
map_location = validate_map_location(map_location)
if _extra_files is None:
_extra_files = {}
cu = torch._C.CompilationUnit()
if isinstance(f, str) or isinstance(f, pathlib.Path):
cpp_module = torch._C.import_ir_module(cu, str(f), map_location, _extra_files)
else:
cpp_module = torch._C.import_ir_module_from_buffer(
cu, f.read(), map_location, _extra_files
)
# TODO: Pretty sure this approach loses ConstSequential status and such
return wrap_cpp_module(cpp_module)
def validate_map_location(map_location=None):
if isinstance(map_location, str):
map_location = torch.device(map_location)
elif not (map_location is None or isinstance(map_location, torch.device)):
raise ValueError(
"map_location should be either None, string or torch.device, "
"but got type: " + str(type(map_location))
)
if str(map_location).startswith("cuda"):
validate_cuda_device(map_location)
return map_location
def get_ff_module():
try:
import torch._C_flatbuffer as ff
return ff
except ImportError:
print("Please include //caffe2:_C_flatbuffer as dependency.")
raise
def jit_module_from_flatbuffer(f):
ff = get_ff_module()
if isinstance(f, string_classes):
if not os.path.exists(f): # type: ignore[type-var]
raise ValueError("The provided filename {} does not exist".format(f)) # type: ignore[str-bytes-safe]
if os.path.isdir(f):
raise ValueError("The provided filename {} is a directory".format(f)) # type: ignore[str-bytes-safe]
if isinstance(f, str) or isinstance(f, pathlib.Path):
f = str(f)
return wrap_cpp_module(ff._load_jit_module_from_file(f))
else:
return wrap_cpp_module(ff._load_jit_module_from_bytes(f.read()))
def save_jit_module_to_flatbuffer(m, f, _extra_files=None):
r"""
Save an offline version of this module for use in a separate process. The
saved module serializes all of the methods, submodules, parameters, and
attributes of this module. It can be loaded into the C++ API using
``torch::jit::load_jit_module_from_file(filename)`` or into the Python API with
:func:`torch.jit.jit_module_from_flatbuffer<torch.jit.jit_module_from_flatbuffer>`.
To be able to save a module, it must not make any calls to native Python
functions. This means that all submodules must be subclasses of
:class:`ScriptModule` as well.
.. DANGER::
All modules, no matter their device, are always loaded onto the CPU
during loading. This is different from :func:`torch.load`'s semantics
and may change in the future.
Args:
m: A :class:`ScriptModule` to save.
f: A string for file path
Example:
.. testcode::
import torch
import io
class MyModule(torch.nn.Module):
def forward(self, x):
return x + 10
m = torch.jit.script(MyModule())
# Save to file
torch.jit.save_jit_module_to_flatbuffer(m, 'scriptmodule.ff')
"""
extra_files = _extra_files
if extra_files is None:
extra_files = {}
ff = get_ff_module()
if isinstance(f, str) or isinstance(f, pathlib.Path):
f = str(f)
ff._save_jit_module(m._c, f, extra_files)
else:
s = ff._save_jit_module_to_bytes(m._c, extra_files)
f.write(s)
def get_flatbuffer_module_info(path_or_file):
r"""Get some information regarding a model file in flatbuffer format.
Args:
path_or_file: Either str, Path or file like object (BytesIO OK).
If it's str or Path, we will read the file referenced by that
path as Bytes.
Returns:
A dict with metadata on what that file contains, currently looks like
this:
{
'bytecode_version': 4, # int
'operator_version': 4, # int
'function_names': {
'__torch__.___torch_mangle_0.Foo.forward'}, # set
'type_names': set(), # set
'opname_to_num_args': {'aten::linear': 3} # Dict[str, int]
}
"""
ff = get_ff_module()
if isinstance(path_or_file, str) or isinstance(path_or_file, pathlib.Path):
with open(path_or_file, "rb") as f:
all_bytes = f.read()
else:
all_bytes = path_or_file.read()
return ff._get_module_info_from_flatbuffer(all_bytes)
| pytorch-master | torch/jit/_serialization.py |
from torch import Tensor, _VF # noqa: F401
from torch.nn.utils.rnn import PackedSequence
import torch
import warnings
from typing import List, Optional, Tuple
class QuantizedLinear(torch.jit.ScriptModule):
__constants__ = ['scale', 'zero_point']
def __init__(self, other):
super(QuantizedLinear, self).__init__()
warnings.warn(
"torch.jit.QuantizedLinear is deprecated and will be removed in an upcoming "
"PyTorch release. Please use the torch.nn.quantized.dynamic.Linear instead.")
self.in_features = other.in_features
self.out_features = other.out_features
# Quantize weight and discard the original
self.weight, self.col_offsets, self.scale, self.zero_point = torch.fbgemm_linear_quantize_weight(
other.weight.clone(memory_format=torch.contiguous_format).float())
self.weight = torch.nn.Parameter(self.weight, requires_grad=False)
self.col_offsets = torch.nn.Parameter(self.col_offsets, requires_grad=False)
assert other.bias is not None, 'QuantizedLinear requires a bias'
self.bias = torch.nn.Parameter(other.bias.clone(memory_format=torch.contiguous_format).float(), requires_grad=False)
self.register_buffer(
'packed_tensor_ptr',
torch.fbgemm_pack_quantized_matrix(self.weight.clone(memory_format=torch.contiguous_format)))
@torch.jit.script_method
def _unpack(self):
self.packed_tensor_ptr.set_(
torch.fbgemm_pack_quantized_matrix(self.weight))
@torch.jit.script_method
def _pack(self):
self.packed_tensor_ptr.set_(
torch.zeros(torch.jit.annotate(List[int], []), dtype=torch.uint8).detach())
@torch.jit.script_method
def forward(self, input):
out = torch.fbgemm_linear_int8_weight_fp32_activation(
input.float(), self.weight, self.packed_tensor_ptr, self.col_offsets,
self.scale, self.zero_point, self.bias)
return out.to(input.dtype)
def extra_repr(self):
repr = 'in_features={in_features}, out_features={out_features}, ' \
'scale={scale}, zero_point={zero_point}'.format(**self.__dict__)
return repr
# FP16 weights
class QuantizedLinearFP16(torch.jit.ScriptModule):
def __init__(self, other):
super(QuantizedLinearFP16, self).__init__()
warnings.warn(
"torch.jit.QuantizedLinearFP16 is deprecated and will be removed in an upcoming "
"PyTorch release. Please use the torch.nn.quantized.dynamic.Linear instead.")
self.in_features = other.in_features
self.out_features = other.out_features
self.original_weight = other.weight
self.weight = torch.fbgemm_pack_gemm_matrix_fp16(
other.weight.clone(memory_format=torch.contiguous_format).float())
assert other.bias is not None, 'QuantizedLinearFP16 requires a bias'
self.bias = torch.nn.Parameter(other.bias.clone(memory_format=torch.contiguous_format).float(), requires_grad=False)
self.register_buffer('packed_weight', self.weight)
@torch.jit.script_method
def _unpack(self):
self.packed_weight.set_(
torch.fbgemm_pack_gemm_matrix_fp16(
self.original_weight))
@torch.jit.script_method
def _pack(self):
self.packed_weight.set_(
torch.zeros(torch.jit.annotate(List[int], []), dtype=torch.uint8).detach())
@torch.jit.script_method
def forward(self, input):
out = torch.fbgemm_linear_fp16_weight_fp32_activation(
input.float(), self.packed_weight, self.bias)
return out
def extra_repr(self):
repr = 'in_features={in_features}, out_features={out_features}, '.format(**self.__dict__)
return repr
# Quantized RNN cell implementations
class QuantizedRNNCellBase(torch.jit.ScriptModule):
__constants__ = ['input_size', 'hidden_size', 'bias', 'scale_hh', 'scale_ih',
'zero_point_ih', 'zero_point_hh']
def __init__(self, other):
super(QuantizedRNNCellBase, self).__init__()
warnings.warn(
"torch.jit.QuantizedRNNCellBase is deprecated and will be removed in an upcoming "
"PyTorch release. Please use the torch.nn.quantized.dynamic.RNNCell instead.")
self.input_size = other.input_size
self.hidden_size = other.hidden_size
self.bias = other.bias
if not self.bias:
raise ValueError("Quantized RNN cells require bias terms")
weight_ih, col_offsets_ih, self.scale_ih, self.zero_point_ih = \
torch.fbgemm_linear_quantize_weight(other.weight_ih.clone(memory_format=torch.contiguous_format).float())
self.register_buffer('weight_ih', weight_ih)
self.register_buffer('col_offsets_ih', col_offsets_ih)
weight_hh, col_offsets_hh, self.scale_hh, self.zero_point_hh = \
torch.fbgemm_linear_quantize_weight(other.weight_hh.clone(memory_format=torch.contiguous_format).float())
self.register_buffer('weight_hh', weight_hh)
self.register_buffer('col_offsets_hh', col_offsets_hh)
packed_ih = torch.fbgemm_pack_quantized_matrix(self.weight_ih)
self.register_buffer('packed_ih', packed_ih)
packed_hh = torch.fbgemm_pack_quantized_matrix(self.weight_hh)
self.register_buffer('packed_hh', packed_hh)
self.bias_ih = torch.nn.Parameter(other.bias_ih.clone(memory_format=torch.contiguous_format).float(), requires_grad=False)
self.bias_hh = torch.nn.Parameter(other.bias_hh.clone(memory_format=torch.contiguous_format).float(), requires_grad=False)
def extra_repr(self):
s = '{input_size}, {hidden_size}'
if 'bias' in self.__dict__ and self.bias is not True:
s += ', bias={bias}'
if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh":
s += ', nonlinearity={nonlinearity}'
return s.format(**self.__dict__)
@torch.jit.script_method
def check_forward_input(self, input):
if input.size(1) != self.input_size:
raise RuntimeError(
"input has inconsistent input_size: got {}, expected {}".format(
input.size(1), self.input_size))
@torch.jit.script_method
def check_forward_hidden(self, input: Tensor, hx: Tensor, hidden_label: str = '') -> None:
if input.size(0) != hx.size(0):
raise RuntimeError(
"Input batch size {} doesn't match hidden{} batch size {}".format(
input.size(0), hidden_label, hx.size(0)))
if hx.size(1) != self.hidden_size:
raise RuntimeError(
"hidden{} has inconsistent hidden_size: got {}, expected {}".format(
hidden_label, hx.size(1), self.hidden_size))
# TODO: for some reason weak_script_method causes a destruction of the
# module to occur, which in turn frees the packed_ih object via its DataPtr
# deleter. This is bizarre and should probably get fixed.
# @torch._jit_internal.weak_script_method
@torch.jit.script_method
def _unpack(self):
self.packed_ih.set_(torch.fbgemm_pack_quantized_matrix(self.weight_ih))
self.packed_hh.set_(torch.fbgemm_pack_quantized_matrix(self.weight_hh))
# @torch._jit_internal.weak_script_method
@torch.jit.script_method
def _pack(self):
self.packed_ih.set_(
torch.zeros(torch.jit.annotate(List[int], []), dtype=torch.uint8).detach())
self.packed_hh.set_(
torch.zeros(torch.jit.annotate(List[int], []), dtype=torch.uint8).detach())
class QuantizedRNNCell(QuantizedRNNCellBase):
__constants__ = ['input_size', 'hidden_size', 'bias', 'scale_hh', 'scale_ih',
'zero_point_ih', 'zero_point_hh', 'nonlinearity']
def __init__(self, other):
super(QuantizedRNNCell, self).__init__(other)
warnings.warn(
"torch.jit.QuantizedRNNCell is deprecated and will be removed in an upcoming "
"PyTorch release. Please use the torch.nn.quantized.dynamic.RNNCell instead.")
self.nonlinearity = other.nonlinearity
@torch.jit.script_method
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
self.check_forward_input(input)
if hx is None:
hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
self.check_forward_hidden(input, hx, '')
if self.nonlinearity == "tanh":
ret = _VF.quantized_rnn_tanh_cell(
input, hx, self.weight_ih, self.weight_hh, self.bias_ih,
self.bias_hh, self.packed_ih, self.packed_hh, self.col_offsets_ih,
self.col_offsets_hh, self.scale_ih, self.scale_hh, self.zero_point_ih,
self.zero_point_hh
)
elif self.nonlinearity == "relu":
ret = _VF.quantized_rnn_relu_cell(
input, hx, self.weight_ih, self.weight_hh, self.bias_ih,
self.bias_hh, self.packed_ih, self.packed_hh, self.col_offsets_ih,
self.col_offsets_hh, self.scale_ih, self.scale_hh, self.zero_point_ih,
self.zero_point_hh
)
else:
ret = input # TODO: remove when jit supports exception flow
raise RuntimeError(
"Unknown nonlinearity: {}".format(self.nonlinearity))
return ret
class QuantizedLSTMCell(QuantizedRNNCellBase):
def __init__(self, other):
super(QuantizedLSTMCell, self).__init__(other)
warnings.warn(
"torch.jit.QuantizedLSTMCell is deprecated and will be removed in an upcoming "
"PyTorch release. Please use the torch.nn.quantized.dynamic.LSTMCell instead.")
@torch.jit.script_method
def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
self.check_forward_input(input)
if hx is None:
zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
hx = (zeros, zeros)
self.check_forward_hidden(input, hx[0], '[0]')
self.check_forward_hidden(input, hx[1], '[1]')
return _VF.quantized_lstm_cell(
input, hx, self.weight_ih, self.weight_hh, self.bias_ih,
self.bias_hh, self.packed_ih, self.packed_hh, self.col_offsets_ih,
self.col_offsets_hh, self.scale_ih, self.scale_hh, self.zero_point_ih,
self.zero_point_hh
)
class QuantizedGRUCell(QuantizedRNNCellBase):
def __init__(self, other):
super(QuantizedGRUCell, self).__init__(other)
warnings.warn(
"torch.jit.QuantizedGRUCell is deprecated and will be removed in an upcoming "
"PyTorch release. Please use the torch.nn.quantized.dynamic.GRUCell instead.")
@torch.jit.script_method
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
self.check_forward_input(input)
if hx is None:
hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
self.check_forward_hidden(input, hx, '')
return _VF.quantized_gru_cell(
input, hx, self.weight_ih, self.weight_hh, self.bias_ih,
self.bias_hh, self.packed_ih, self.packed_hh, self.col_offsets_ih,
self.col_offsets_hh, self.scale_ih, self.scale_hh, self.zero_point_ih,
self.zero_point_hh
)
def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
return tensor.index_select(dim, permutation)
class QuantizedRNNBase(torch.jit.ScriptModule):
__constants__ = ['mode', 'input_size', 'hidden_size', 'num_layers', 'bias',
'batch_first', 'dropout', 'bidirectional', 'dtype']
def __init__(self, other, dtype=torch.int8):
super(QuantizedRNNBase, self).__init__()
warnings.warn(
"torch.jit.QuantizedRNNBase is deprecated and will be removed in an upcoming "
"PyTorch release. Please use the torch.nn.quantized.dynamic instead.")
self.mode = other.mode
self.input_size = other.input_size
self.hidden_size = other.hidden_size
self.num_layers = other.num_layers
self.bias = other.bias
self.batch_first = other.batch_first
if self.mode != 'GRU':
assert not self.batch_first
self.dropout = other.dropout
self.bidirectional = other.bidirectional
num_directions = 2 if self.bidirectional else 1
self.dtype = dtype
assert self.bias
# TODO: support more than just LSTM
if self.mode != 'LSTM' and self.mode != 'GRU':
raise RuntimeError('Only LSTM or GRU is supported for QuantizedRNN')
if dtype != torch.int8 and dtype != torch.float16:
raise RuntimeError('Unsupported dtype: {}'.format(dtype))
self.all_weights = []
for layer in range(self.num_layers):
for direction in range(num_directions):
layer_input_size = self.input_size if layer == 0 else self.hidden_size * num_directions
suffix = '_reverse' if direction == 1 else ''
def get_weight_bias(ihhh):
weight_name = 'weight_{}_l{}{}'.format(ihhh, layer, suffix)
bias_name = 'bias_{}_l{}{}'.format(ihhh, layer, suffix)
weight = getattr(other, weight_name)
bias = getattr(other, bias_name)
return weight, bias
weight_ih, bias_ih = get_weight_bias('ih')
weight_hh, bias_hh = get_weight_bias('hh')
if dtype == torch.int8:
cell_params = torch.ops.quantized.make_quantized_cell_params(
weight_ih, weight_hh, bias_ih, bias_hh)
else:
packed_ih = torch.ops.quantized.linear_prepack_fp16(
weight_ih.float(), bias_ih)
packed_hh = torch.ops.quantized.linear_prepack_fp16(
weight_hh.float(), bias_hh)
cell_params = torch.ops.quantized.make_quantized_cell_params_fp16(
packed_ih, packed_hh)
setattr(self, 'cell_params_{}_{}'.format(layer, suffix), cell_params)
self.all_weights.append(cell_params)
@torch.jit.script_method
def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None:
expected_input_dim = 2 if batch_sizes is not None else 3
if input.dim() != expected_input_dim:
raise RuntimeError(
'input must have {} dimensions, got {}'.format(
expected_input_dim, input.dim()))
if self.input_size != input.size(-1):
raise RuntimeError(
'input.size(-1) must be equal to input_size. Expected {}, got {}'.format(
self.input_size, input.size(-1)))
@torch.jit.script_method
def get_expected_hidden_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]:
if batch_sizes is not None:
mini_batch = int(batch_sizes[0])
else:
mini_batch = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
expected_hidden_size = (self.num_layers * num_directions,
mini_batch, self.hidden_size)
return expected_hidden_size
@torch.jit.script_method
def check_hidden_size(self, hx: Tensor, expected_hidden_size: Tuple[int, int, int],
msg: str = 'Expected hidden size {}, got {}') -> None:
if hx.size() != expected_hidden_size:
raise RuntimeError(msg.format(expected_hidden_size, list(hx.size())))
@torch.jit.script_method
def check_forward_args(self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]) -> None:
self.check_input(input, batch_sizes)
expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
self.check_hidden_size(hidden, expected_hidden_size, msg='Expected hidden size {}, got {}')
@torch.jit.script_method
def permute_hidden(self, hx: Tensor, permutation: Optional[Tensor]) -> Tensor:
if permutation is None:
return hx
return apply_permutation(hx, permutation)
class QuantizedLSTM(QuantizedRNNBase):
__overloads__ = {'forward': ['forward_packed', 'forward_tensor']}
def __init__(self, other, dtype):
super(QuantizedLSTM, self).__init__(other, dtype)
warnings.warn(
"torch.jit.QuantizedLSTM is deprecated and will be removed in an upcoming "
"PyTorch release. Please use the torch.nn.quantized.dynamic.LSTM instead.")
@torch.jit.script_method
def forward_impl(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]], batch_sizes: Optional[Tensor],
max_batch_size: int, sorted_indices: Optional[Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
if hx is None:
num_directions = 2 if self.bidirectional else 1
zeros = torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device)
hx = (zeros, zeros)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
assert batch_sizes is None
result = torch.quantized_lstm(input, hx, self.all_weights, self.bias, self.num_layers,
float(self.dropout), self.training, self.bidirectional,
self.batch_first, dtype=self.dtype, use_dynamic=False)
output = result[0]
hidden = result[1:]
return output, hidden
@torch.jit.script_method
def forward_tensor(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
@torch.jit.script_method
def forward_packed(self, input: PackedSequence, hx: Optional[Tuple[Tensor, Tensor]] = None
) -> Tuple[PackedSequence, Tuple[Tensor, Tensor]]:
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices)
output = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
@torch.jit.script_method
def permute_hidden(self, hx: Tuple[Tensor, Tensor], permutation: Optional[Tensor]) -> Tuple[Tensor, Tensor]:
if permutation is None:
return hx
return apply_permutation(hx[0], permutation), apply_permutation(hx[1], permutation)
@torch.jit.script_method
def check_forward_args(self, input: Tensor, hidden: Tuple[Tensor, Tensor], batch_sizes: Optional[Tensor]) -> None:
self.check_input(input, batch_sizes)
expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
self.check_hidden_size(hidden[0], expected_hidden_size,
'Expected hidden[0] size {}, got {}')
self.check_hidden_size(hidden[1], expected_hidden_size,
'Expected hidden[1] size {}, got {}')
def forward(self, input, hx=None):
if isinstance(input, PackedSequence):
return self.forward_packed(input, hx)
else:
return self.forward_tensor(input, hx)
class QuantizedGRU(QuantizedRNNBase):
__overloads__ = {'forward': ['forward_packed', 'forward_tensor']}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"torch.jit.QuantizedGRU is deprecated and will be removed in an upcoming "
"PyTorch release. Please use the torch.nn.quantized.dynamic.GRU instead.")
@torch.jit.script_method
def forward_impl(self, input: Tensor, hx: Optional[Tensor], batch_sizes: Optional[Tensor], max_batch_size: int,
sorted_indices: Optional[Tensor]) -> Tuple[Tensor, Tensor]:
if hx is None:
num_directions = 2 if self.bidirectional else 1
hx = torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
if batch_sizes is None:
result = torch.quantized_gru(input, hx, self.all_weights, self.bias, self.num_layers,
float(self.dropout), self.training, self.bidirectional,
self.batch_first)
else:
result = torch.quantized_gru(input, batch_sizes, hx, self.all_weights, self.bias, self.num_layers,
float(self.dropout), self.training, self.bidirectional)
output = result[0]
hidden = result[1]
return output, hidden
@torch.jit.script_method
def forward_tensor(self, input: Tensor, hx: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
@torch.jit.script_method
def forward_packed(self, input: PackedSequence, hx: Optional[Tensor] = None) -> Tuple[PackedSequence, Tensor]:
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices)
output = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
def forward(self, input, hx=None):
if isinstance(input, PackedSequence):
return self.forward_packed(input, hx)
else:
return self.forward_tensor(input, hx)
def quantize_rnn_cell_modules(module):
warnings.warn("quantize_rnn_cell_modules function has been deprecated. "
"Please use torch.ao.quantization.quantize_dynamic API instead.")
reassign = {}
for name, mod in module.named_modules():
if mod is module:
continue
new_mod = quantize_rnn_cell_modules(mod)
if new_mod is not mod:
reassign[name] = new_mod
for name, mod in reassign.items():
setattr(module, name, mod)
if isinstance(module, torch.nn.LSTMCell):
return QuantizedLSTMCell(module)
if isinstance(module, torch.nn.GRUCell):
return QuantizedGRUCell(module)
if isinstance(module, torch.nn.RNNCell):
return QuantizedRNNCell(module)
return module
def quantize_linear_modules(module, dtype=torch.int8):
warnings.warn("quantize_linear_modules function has been deprecated. "
"Please use torch.ao.quantization.quantize_dynamic API instead.")
reassign = {}
for name, mod in module.named_modules():
if mod is module:
continue
new_mod = quantize_linear_modules(mod, dtype)
if new_mod is not mod:
reassign[name] = new_mod
for name, mod in reassign.items():
setattr(module, name, mod)
if isinstance(module, torch.nn.Linear):
if dtype == torch.int8:
return QuantizedLinear(module)
elif dtype == torch.float16:
return QuantizedLinearFP16(module)
else:
raise RuntimeError(
"Unsupported dtype: {}".format(dtype))
return module
def quantize_rnn_modules(module, dtype=torch.int8):
warnings.warn("quantize_rnn_modules function has been deprecated. "
"Please use torch.ao.quantization.quantize_dynamic API instead.")
reassign = {}
for name, mod in module.named_modules():
if mod is module:
continue
new_mod = quantize_rnn_modules(mod, dtype)
if new_mod is not mod:
reassign[name] = new_mod
for name, mod in reassign.items():
setattr(module, name, mod)
if isinstance(module, torch.nn.LSTM):
if dtype != torch.int8 and dtype != torch.float16:
raise RuntimeError("Unsupported dtype: {}".format(dtype))
return QuantizedLSTM(module, dtype)
if isinstance(module, torch.nn.GRU):
return QuantizedGRU(module)
return module
| pytorch-master | torch/jit/quantized.py |
"""TorchScript
This module contains functionality to support the JIT's scripting frontend, notably:
- torch.jit.script
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
import functools
import collections
import enum
import inspect
import copy
import pickle
import warnings
from typing import Any, Dict, List, Set, Tuple, Union, Callable
import torch
import torch._jit_internal as _jit_internal
from torch.utils import set_module
from torch.jit._recursive import ScriptMethodStub, wrap_cpp_module, infer_methods_to_compile, _compile_and_register_class
from torch.nn import Module
from torch.jit._state import _enabled
from torch.jit._builtins import _register_builtin
from torch._six import with_metaclass
from torch.jit.frontend import get_jit_def, get_default_args, get_jit_class_def
from torch._jit_internal import _qualified_name
from torch.jit._fuser import _graph_for, _script_method_graph_for
from torch.jit._state import (
_try_get_jit_cached_function,
_try_get_jit_cached_overloads,
_set_jit_function_cache,
_set_jit_overload_cache,
)
from torch.overrides import (
has_torch_function, has_torch_function_unary, has_torch_function_variadic)
from torch.package import PackageExporter, PackageImporter
from ._serialization import validate_map_location
from torch.jit._monkeytype_config import (
monkeytype_trace,
JitTypeTraceConfig ,
JitTypeTraceStore
)
from torch._classes import classes
type_trace_db = JitTypeTraceStore() # DB to hold all call traces from MonkeyType
torch._C.ScriptMethod.graph_for = _script_method_graph_for # type: ignore[attr-defined]
torch._C.ScriptFunction.graph_for = _graph_for # type: ignore[attr-defined]
ScriptFunction = torch._C.ScriptFunction
ScriptFunction.__doc__ = """
Functionally equivalent to a :class:`ScriptModule`, but represents a single
function and does not have any attributes or Parameters.
"""
set_module(ScriptFunction, "torch.jit")
# Throws an error if a jit function is pickled.
# Helps to avoid Python crashes for Python versions 3.9.5 + when protocol 0 or 1 is given as an argument.
def _reduce(cls):
raise pickle.PickleError("ScriptFunction cannot be pickled")
ScriptFunction.__reduce__ = _reduce # type: ignore[assignment]
if _enabled:
Attribute = collections.namedtuple("Attribute", ["value", "type"])
else:
def Attribute(value, type): # type: ignore[no-redef]
return value
Attribute.__doc__ = """
This method is a pass-through function that returns `value`, mostly
used to indicate to the TorchScript compiler that the left-hand side
expression is a class instance attribute with type of `type`. Note that
`torch.jit.Attribute` should only be used in `__init__` method of `jit.ScriptModule`
subclasses.
Though TorchScript can infer correct type for most Python expressions, there are some cases where
type inference can be wrong, including:
- Empty containers like `[]` and `{}`, which TorchScript assumes to be container of `Tensor`
- Optional types like `Optional[T]` but assigned a valid value of type `T`, TorchScript would assume
it is type `T` rather than `Optional[T]`
In eager mode, it is simply a pass-through function that returns `value`
without other implications.
Example:
.. testcode::
import torch
from typing import Dict
class AttributeModule(torch.jit.ScriptModule):
def __init__(self):
super(AttributeModule, self).__init__()
self.foo = torch.jit.Attribute(0.1, float)
# we should be able to use self.foo as a float here
assert 0.0 < self.foo
self.names_ages = torch.jit.Attribute({}, Dict[str, int])
self.names_ages["someone"] = 20
assert isinstance(self.names_ages["someone"], int)
m = AttributeModule()
# m will contain two attributes
# 1. foo of type float
# 2. names_ages of type Dict[str, int]
.. testcleanup::
del AttributeModule
del m
Note: it's now preferred to instead use type annotations instead of `torch.jit.Annotate`:
.. testcode::
import torch
from typing import Dict
class AttributeModule(torch.nn.Module):
names: Dict[str, int]
def __init__(self):
super(AttributeModule, self).__init__()
self.names = {}
m = AttributeModule()
.. testcleanup::
del AttributeModule
del m
Args:
value: An initial value to be assigned to attribute.
type: A Python type
Returns:
Returns `value`
"""
def _get_type_trace_db():
# This is a private API. Use of this for external purposes is discouraged.
return type_trace_db
# Gets a function from the name of a method on a type
def _get_function_from_type(cls, name):
return getattr(cls, name, None)
# ScriptClasses must be new-style classes because we construct them using their
# __new__ method.
def _is_new_style_class(cls):
if hasattr(cls, "__class__"):
return "__dict__" in dir(cls) or hasattr(cls, "__slots__")
# These OrderedDictWrapper classes replace the actual OrderedDicts in
# module with versions that get/set properties inside of Module.
# This allows us to reuse most of nn.Module while still storing the
# data in C++.
# Each OrderedDict needs to support:
# x not in view
# x in view
# view[name] = ...
# view.values()
# del view[name]
# view.items()
# view.keys()
# len(view)
class OrderedDictWrapper(object):
def __init__(self, _c):
self._c = _c
def keys(self):
return [k for k, v in self.items()]
def values(self):
return [v for k, v in self.items()]
def __len__(self):
return len(self.values())
def __delitem__(self, k):
raise RuntimeError("cannot delete methods or parameters of a script module")
def items(self):
return self._c.items()
def __setitem__(self, k, v):
if k not in self:
raise RuntimeError(
"Can't add a new parameter after ScriptModule construction."
" Tried to add '{}".format(k)
)
self._c.setattr(k, v)
def __contains__(self, k):
return self._c.contains(k)
def __getitem__(self, k):
if k not in self:
raise KeyError(k)
return self._c.getattr(k)
class OrderedModuleDict(OrderedDictWrapper):
def __init__(self, module, python_dict):
super(OrderedModuleDict, self).__init__(torch._C.ModuleDict(module))
# contains _both_ script modules and non-script python-only modules
# because script modules are subclassed in python and the
# C++ Module class will not hold references to them,
# to ensure that you always get the same python value here
# we store it in the python dict as well
self._python_modules = python_dict
def items(self):
r = self._python_modules.items()
return r
def __contains__(self, k):
return k in self._python_modules
def __setitem__(self, k, v):
# Cases where sub-module can be re-assigned after ScriptModule construction
# 1. If the attr is an module interface type, it's guaranteed that the module is
# not inlined in the graph, so it's safe to swap a new ScriptModule in.
# 2. if the new value if a ScriptModule with the same JIT type, IR won't change
# and it's legit to swap a new module in.
# In these two cases we allow swapping a new scripted module and update the
# corresponding python module dict to keep sync.
# Note: the value to be swapped in has to be ScriptModule instead of nn.Module,
# otherwise it's illegal and we throw error.
if isinstance(v, ScriptModule):
self._c.setattr(k, v)
self._python_modules[k] = v
else:
raise RuntimeError(
"Cannot re-assign modules in a ScriptModule with non-scripted "
"module, tried to replace existing module '{}': {}".format(k, v)
)
def __getitem__(self, k):
return self._python_modules[k]
# For each user-defined class that subclasses ScriptModule, this meta-class:
# (1) finds all the methods annotated with @script_method in a ScriptModule and
# removes them from the class attributes
# (2) puts a wrapper around the class's __init__ method to recursively compile
# all of the script_methods with the module after the original __init__ has
# run. This has to occur after the user-defined __init__ so that submodules and
# parameters are initialized _before_ the script compiler resolve references to
# `self.param` or `self.module`.
class ScriptMeta(type):
def __init__(cls, name, bases, attrs): # noqa: B902
# Aggregate all the ScriptMethods and constants from superclasses
cls._methods: Dict[str, Any] = {}
cls._constants_set = set(getattr(cls, "__constants__", ()))
for base in reversed(bases):
for k, v in getattr(base, "_methods", {}).items():
cls._methods[k] = v
base_constants: Set = getattr(base, "_constants_set", set())
cls._constants_set = cls._constants_set.union(base_constants)
# find all the script methods of the current class
for k, v in sorted(attrs.items()):
if isinstance(v, ScriptMethodStub):
delattr(cls, k)
cls._methods[v.original_method.__name__] = v
if getattr(cls, "_disable_script_meta", False):
# We leave built-in ScriptModule types alone, since this metaclass
# is only for compiling user classes that inherit from
# ScriptModule.
return super(ScriptMeta, cls).__init__(name, bases, attrs)
original_init = getattr(cls, "__init__", lambda self: None)
@functools.wraps(original_init)
def init_then_script(self, *args, **kwargs):
num_methods = len(cls._methods)
original_init(self, *args, **kwargs)
added_methods_in_init = len(cls._methods) > num_methods
if type(self) == cls:
def make_stubs(module):
cls = type(module)
if hasattr(cls, "_methods"):
return [v for k, v in sorted(cls._methods.items())]
else:
return infer_methods_to_compile(module)
self.__dict__[
"_actual_script_module"
] = torch.jit._recursive.create_script_module(self, make_stubs, share_types=not added_methods_in_init)
# Delete the Python attributes that now shadow the ScriptModule
# ones, so that __getattr__ and __setattr__ will properly find
# the scripted versions.
concrete_type = self._actual_script_module._concrete_type
for name in concrete_type.get_attributes():
delattr(self, name)
for name, _ in concrete_type.get_modules():
delattr(self, name)
for name in ("_parameters", "_buffers", "_modules"):
delattr(self, name)
cls.__init__ = init_then_script # type: ignore[misc]
super(ScriptMeta, cls).__init__(name, bases, attrs)
class _CachedForward(object):
def __get__(self, obj, cls):
return self.__getattr__("forward") # type: ignore[attr-defined]
class ScriptWarning(Warning):
pass
def script_method(fn):
if not _enabled:
return fn
# NOTE: we need to traverse two frames here because the meta-class frame
# for ScriptModule will be present, as opposed to invoking @script on a
# a function or invoking define() on a CompilationUnit.
# The stack will look like:
#
# 0. createResolutionCallback()
# 1. script_method()
# 2. ScriptModule metaclass frame
# 3. Surrounding scope
#
# createResolutionCallback internally adds 1 to get us to the scope of this
# function (the calling function). Adding 2 gets us to the proper surrounding scope.
_rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=2)
ast = get_jit_def(fn, fn.__name__, self_name="ScriptModule")
return ScriptMethodStub(_rcb, ast, fn)
class ConstMap:
def __init__(self, const_mapping):
self.const_mapping = const_mapping
def __getattr__(self, attr):
return self.const_mapping[attr]
def unpackage_script_module(importer: PackageImporter, script_module_id: str) -> torch.nn.Module:
"""
Called by ``torch.package.PackageImporter``'s Pickler's ``persistent_load`` function.
Performs work of loading and returning a ScriptModule from a ``torch.package`` archive.
"""
if not isinstance(importer.zip_reader, torch._C.PyTorchFileReader):
raise RuntimeError(
"Loading ScriptObjects from a PackageImporter created from a "
"directory is not supported. Use a package archive file instead."
)
cu = torch._C.CompilationUnit()
cpp_module = torch._C._import_ir_module_from_package(
cu,
importer.zip_reader,
importer.storage_context,
validate_map_location(importer.last_map_location),
script_module_id,
)
return wrap_cpp_module(cpp_module)
if _enabled:
_magic_methods = [
"__iter__",
"__len__",
"__neg__",
"__mul__",
"__contains__",
"__add__",
"__sub__",
"__pow__",
"__truediv__",
"__mod__",
"__ne__",
"__eq__",
"__lt__",
"__gt__",
"__le__",
"__ge__",
"__and__",
"__or__",
"__xor__",
"__getitem__",
"__setitem__",
"__call__",
"__int__",
"__float__",
"__bool__",
"__str__",
"__enter__",
"__exit__",
]
class RecursiveScriptClass(object):
"""
An analogue of RecursiveScriptModule for regular objects that are not modules.
This class is a wrapper around a torch._C.ScriptObject that represents an instance
of a TorchScript class and allows it to be used in Python.
Attributes:
_c [torch._C.ScriptObject]: The C++ object to which attribute lookups and method
calls are forwarded.
_props [Dict[str, property]]: A dictionary of properties fetched from self._c and
exposed on this wrppaer.
"""
def __init__(self, cpp_class):
super(RecursiveScriptClass, self).__init__()
self.__dict__["_initializing"] = True
self._c = cpp_class
# Add wrapped object's properties to this class instance.
self._props = {prop.name: property(prop.getter, prop.setter) for prop in self._c._properties()}
self.__dict__["_initializing"] = False
def __getattr__(self, attr):
if "_initializing" in self.__dict__ and self.__dict__["_initializing"]:
return super(RecursiveScriptClass, self).__getattr__(attr) # type: ignore[misc]
if attr in self._props:
return self._props[attr].fget() # type: ignore[call-arg, misc]
return getattr(self._c, attr)
def __setattr__(self, attr, value):
if "_initializing" in self.__dict__ and self.__dict__["_initializing"]:
return super(RecursiveScriptClass, self).__setattr__(attr, value)
if attr in self._props:
return self._props[attr].fset(value) # type: ignore[call-arg, misc]
setattr(self._c, attr, value)
# Delegate calls to magic methods like __len__ to the C++ module backing the
# RecursiveScriptClass.
def forward_magic_method(self, method_name, *args, **kwargs):
if not self._c._has_method(method_name):
raise TypeError()
self_method = self.__getattr__(method_name)
return self_method(*args, **kwargs)
def __getstate__(self):
raise pickle.PickleError("ScriptClasses cannot be pickled")
def __iadd__(self, other):
if self._c._has_method("__iadd__"):
return self.forward_magic_method("__iadd__", other)
else:
return self.forward_magic_method("__add__", other)
for method_name in _magic_methods:
def method_template(self, *args, **kwargs):
return self.forward_magic_method(method_name, *args, **kwargs)
setattr(RecursiveScriptClass, method_name, method_template)
# this is a Python 'non-data descriptor' that causes the first access
# to ScriptModule's forward to look up the forward method and stash
# it in the objects dict. Due to the standard rules for attribute lookup,
# subsequent lookups will just directly return the previously looked up method.
# This is necessary because nn.Module defines forward as a method. If we
# did nothing, __getattr__ would not be called. Instead we'd get nn.Module.forward
# which always throws an exception.
class ScriptModule(with_metaclass(ScriptMeta, Module)): # type: ignore[misc]
r"""
A wrapper around C++ ``torch::jit::Module``. ``ScriptModule``\s
contain methods, attributes, parameters, and
constants. These can be accessed the same way as on a normal ``nn.Module``.
"""
__jit_unused_properties__ = ['code', 'code_with_constants', 'graph', 'inlined_graph', 'original_name']
def __init__(self):
super(ScriptModule, self).__init__()
forward = _CachedForward()
def __getattr__(self, attr):
if "_actual_script_module" not in self.__dict__:
return super(ScriptModule, self).__getattr__(attr)
return getattr(self._actual_script_module, attr)
def __setattr__(self, attr, value):
if "_actual_script_module" not in self.__dict__:
# Unwrap torch.jit.Attribute into a regular setattr + record
# the provided type in __annotations__.
#
# This ensures that if we use the attr again in `__init__`, it
# will look like the actual value, not an instance of Attribute.
if isinstance(value, Attribute):
# NB: Ensure that we set __annotations__ on the specific
# class in question, and not on a superclass (which would
# be wrong wrong wrong!).
# See also https://github.com/pytorch/pytorch/issues/39463
if "__annotations__" not in self.__class__.__dict__:
self.__class__.__annotations__ = {}
self.__annotations__[attr] = value.type
value = value.value
return super(ScriptModule, self).__setattr__(attr, value)
setattr(self._actual_script_module, attr, value)
def define(self, src):
if "_actual_script_module" in self.__dict__:
# If we have completed initialization, just defer to the
# backing RecursiveScriptModule to eagerly compile the provided
# source.
return self._actual_script_module.define(src)
# Otherwise, we are still in the object's __init__.
# In that case, add `src` as a stub to be compiled.
#
# We use frames_up=1 to get to the proper surrounding scope. The stack
# will look like:
# 0. createResolutionCallback
# 1. define()
# 2. surrounding scope.
#
# createResolutionCallback internally adds 1 to get us to our frame, then
# we add 1 to get to the proper surrounding scope.
rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1)
ast = torch._C._parse_source_def(src)
self._methods[ast.name().name] = ScriptMethodStub(rcb, ast, None)
def _replicate_for_data_parallel(self):
return self._actual_script_module._replicate_for_data_parallel()
def __reduce_package__(self, exporter: PackageExporter):
"""
Called by ``torch.package.PackageExporter``'s Pickler's ``persistent_id`` when
saving TorchScript objects. Performs act of saving a ScriptModule inside of
a ``torch.package`` archive.
Returns method to load the ScriptModule from a ``torch.package.PackageImporter``'s
Pickler's ``persistent_load`` function.
"""
script_module_id = exporter.get_unique_id()
exporter.script_module_serializer.serialize(self._c, int(script_module_id))
return (unpackage_script_module, (script_module_id,))
class RecursiveScriptModule(ScriptModule):
# XXX: RecursiveScriptModule inherits from ScriptModule for the sole
# reason that it retains the existing isinstance(ScriptModule)
# behavior.
r"""
The core data structure in TorchScript is the ``ScriptModule``. It is an
analogue of torch's ``nn.Module`` and represents an entire model as a tree of
submodules. Like normal modules, each individual module in a ``ScriptModule`` can
have submodules, parameters, and methods. In ``nn.Module``\s methods are implemented
as Python functions, but in ``ScriptModule``\s methods are implemented as
TorchScript functions, a statically-typed subset of Python that contains all
of PyTorch's built-in Tensor operations. This difference allows your
``ScriptModule``\s code to run without the need for a Python interpreter.
``ScriptModule``\s should not be created manually, instead use
either :func:`tracing <torch.jit.trace>` or :func:`scripting <torch.jit.script>`.
Tracing and scripting can be applied incrementally and :ref:`composed as necessary <Types>`.
* Tracing records the tensor operations as executed with a set of example inputs and uses these
operations to construct a computation graph. You can use the full dynamic behavior of Python with tracing,
but values other than Tensors and control flow aren't captured in the graph.
* Scripting inspects the Python code of the model
and compiles it to TorchScript. Scripting allows the use of many `types`_ of values and supports dynamic control flow.
Many, but not all features of Python are supported by the compiler, so changes to the source code may be necessary.
"""
_disable_script_meta = True
def __init__(self, cpp_module):
self.__dict__["_initializing"] = True
self._c = cpp_module
super(RecursiveScriptModule, self).__init__()
# Delete the 'training' attribute set up by `Module.__init__`. It
# will get set on the underlying cpp module, so we delete it here
# to avoid this version shadowing the cpp module version.
delattr(self, "training")
@staticmethod
def _construct(cpp_module, init_fn):
"""
Construct a RecursiveScriptModule that's ready for use. PyTorch
code should use this to construct a RecursiveScriptModule instead
of instead of calling `__init__` directly, as it makes sure the
object is properly finalized (and in the future, we may take
control of how the RecursiveScriptModule instance is created).
Args:
cpp_module: The C++ Module that will hold the actual state of
this RecursiveScriptModule instance.
init_fn: Lambda that initializes the RecursiveScriptModule passed to it.
"""
script_module = RecursiveScriptModule(cpp_module)
init_fn(script_module)
# Finalize the ScriptModule: replace the nn.Module state with our
# custom implementations and flip the _initializing bit.
RecursiveScriptModule._finalize_scriptmodule(script_module)
return script_module
@staticmethod
def _finalize_scriptmodule(script_module):
script_module._parameters = OrderedDictWrapper(
torch._C.ParameterDict(script_module._c)
)
script_module._buffers = OrderedDictWrapper(
torch._C.BufferDict(script_module._c)
)
script_module._modules = OrderedModuleDict(
script_module._c, script_module._modules
)
script_module._initializing = False
def _reconstruct(self, cpp_module):
"""
Re-construct an instance of RecursiveScriptModule using an instance of a C++ module.
Args:
cpp_module: The C++ module that this RecursiveScriptModule will be rebuilt around.
"""
self.__init__(cpp_module) # type: ignore[misc]
# Copy the concrete type from the C++ module to this ScriptModule.
self._concrete_type = torch._C.ConcreteModuleType.from_jit_type(
self._c._type()
)
# Copy submodules from the C++ module to this ScriptModule.
modules = {}
for name, cpp_module in torch._C.ModuleDict(self._c).items():
modules[name] = wrap_cpp_module(cpp_module)
self._modules = OrderedModuleDict(self._c, modules)
# Copy parameters and buffers.
self._parameters = OrderedDictWrapper(torch._C.ParameterDict(self._c))
self._buffers = OrderedDictWrapper(torch._C.BufferDict(self._c))
# Get rid of the functions from the old C++ module.
self.__dict__ = {
k: v
for k, v in self.__dict__.items()
if not isinstance(v, torch._C.ScriptMethod)
}
self.__dict__["_initializing"] = False
@property
def graph(self):
r"""
Returns a string representation of the internal graph for the
``forward`` method. See :ref:`interpreting-graphs` for details.
"""
return self._c._get_method("forward").graph
@property
def inlined_graph(self):
r"""
Returns a string representation of the internal graph for the
``forward`` method. This graph will be preprocessed to inline all function and method calls.
See :ref:`interpreting-graphs` for details.
"""
return self.forward.inlined_graph
@property
def code(self):
r"""
Returns a pretty-printed representation (as valid Python syntax) of
the internal graph for the ``forward`` method. See
:ref:`inspecting-code` for details.
"""
return self.forward.code
@property
def code_with_constants(self):
r"""
Returns a tuple of:
[0] a pretty-printed representation (as valid Python syntax) of
the internal graph for the ``forward`` method. See `code`.
[1] a ConstMap following the CONSTANT.cN format of the output in [0].
The indices in the [0] output are keys to the underlying constant's values.
See :ref:`inspecting-code` for details.
"""
r = self.forward.code_with_constants
return (r[0], ConstMap(r[1]))
def save(self, f, **kwargs):
r"""
save(f, _extra_files={})
See :func:`torch.jit.save <torch.jit.save>` for details.
"""
return self._c.save(str(f), **kwargs)
def _save_for_lite_interpreter(self, *args, **kwargs):
r"""
_save_for_lite_interpreter(f)
Add (or update) the bytecode session to the script model. The updated model is used
in lite interpreter for mobile applications.
Args:
f: a string containing a file name.
_extra_files: Map from filename to contents which will be stored as part of 'f'.
"""
return self._c._save_for_mobile(*args, **kwargs)
def _save_to_buffer_for_lite_interpreter(self, *args, **kwargs):
return self._c._save_to_buffer_for_mobile(*args, **kwargs)
def save_to_buffer(self, *args, **kwargs):
return self._c.save_to_buffer(*args, **kwargs)
def get_debug_state(self, *args, **kwargs):
return self._c.get_debug_state()
def extra_repr(self):
return "original_name={}".format(self.original_name)
def graph_for(self, *args, **kwargs):
return self.forward.graph_for(self, *args, **kwargs)
@property
def original_name(self):
if type(self) == str(self._c._type().name()):
return ""
return str(self._c._type().name())
def define(self, src):
# We use frames_up=1 to get to the proper surrounding scope. The stack
# will look like:
# 0. createResolutionCallback
# 1. define()
# 2. surrounding scope.
#
# createResolutionCallback internally adds 1 to get us to our frame, then
# we add 1 to get to the proper surrounding scope.
rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1)
self._c._define(self._concrete_type, src, rcb)
def __getattr__(self, attr):
if "_initializing" not in self.__dict__:
raise RuntimeError(
"ScriptModule has not been initialized, did you forget to call super's init?"
)
if self._initializing:
return super(RecursiveScriptModule, self).__getattr__(attr)
# _modules check is before hasattr since modules are included as attributes in _c,
# but we want to get the python wrapper from _modules instead of the raw _c object.
if attr in self._modules:
return self._modules[attr]
elif self._c.hasattr(attr):
return self._c.getattr(attr)
elif self._c._has_method(attr):
script_method = self._c._get_method(attr)
# cache method so future calls do not go through __getattr__
# to improve invocation performance
self.__dict__[attr] = script_method
return script_method
return super(RecursiveScriptModule, self).__getattr__(attr)
def __setattr__(self, attr, value):
if self._initializing:
return super(RecursiveScriptModule, self).__setattr__(attr, value)
if attr in self._modules:
self._modules[attr] = value
elif self._c.hasattr(attr):
self._c.setattr(attr, value)
elif (
hasattr(self, "_concrete_type")
and attr in self._concrete_type.get_constants().keys()
):
# TODO: we don't have _concrete_type set after load(), and in general we lose constant information.
# We should encode constants as class type attributes (or something) so it persists across save/load.
raise AttributeError(
"Cannot mutate TorchScript constant value: '{}'. Value: '{}'".format(
attr, value
)
)
else:
# We allow setting Python attributes on the ScriptModule, for
# when people want to stash some convenience info on it.
# TODO: it's possible that the following is confusing:
# s = torch.jit.script(...)
# s.python_attr = ...
# s.save() <--- this doesn't have `python_attr`
# It's fairly trivial to save enough info to warn in this case.
return super(RecursiveScriptModule, self).__setattr__(attr, value)
def __copy__(self):
return torch.jit._recursive.wrap_cpp_module(copy.copy(self._c))
def __deepcopy__(self, memo):
return torch.jit._recursive.wrap_cpp_module(copy.deepcopy(self._c, memo))
# Python magic methods do method lookups on an object's class type, instead of looking up
# the method defines on the class instance. In order to continue to expose the magic methods
# of builtin-containers (ModuleList, Sequential, ModuleDict) to Python, we
# define magic methods here as a shim to the correct attribute.
def forward_magic_method(self, method_name, *args, **kwargs):
self_method = getattr(self, method_name)
if getattr(self_method, "__func__", None) == getattr(
RecursiveScriptModule, method_name
):
raise NotImplementedError()
return self_method(*args, **kwargs)
def __iter__(self):
return self.forward_magic_method("__iter__")
def __getitem__(self, idx):
return self.forward_magic_method("__getitem__", idx)
def __len__(self):
return self.forward_magic_method("__len__")
def __contains__(self, key):
return self.forward_magic_method("__contains__", key)
# dir is defined by the base nn.Module, so instead of throwing if
# it is not overridden, we call into the nn.Module __dir__ method
def __dir__(self):
self_method = self.__dir__
if self_method.__func__ == _get_function_from_type( # type: ignore[attr-defined]
RecursiveScriptModule, "__dir__"
):
return super(RecursiveScriptModule, self).__dir__()
return self_method()
# to resolve bool(value), Python looks if __bool__ is defined then __iter__
# is defined then returns true for classes. Since __iter__() on this
# class throws if it isn't overridden, we define __bool__ to preserve default behavior
def __bool__(self):
self_method = self.__bool__
if self_method.__func__ == _get_function_from_type( # type: ignore[attr-defined]
RecursiveScriptModule, "__bool__"
):
return True
return self_method()
def _replicate_for_data_parallel(self):
# we have to initialize ScriptModule properly so that
# it works with pybind11
def init_fn(script_module):
# Don't do anything here, we'll initialize the ScriptModule below
return
return RecursiveScriptModule._construct(
self._c._replicate_for_data_parallel(), init_fn
)
# Need to copy all RecursiveScriptModule methods to ScriptModule.
#
# This is because `super(MyScriptModule, self).foo()` does not use
# `__getattr__` to look up `foo`. So we need to make each method available on
# the ScriptModule manually.
for name, item in RecursiveScriptModule.__dict__.items():
if not callable(item) and not isinstance(item, property):
continue
if name.startswith("__") or hasattr(ScriptModule, name):
continue
# We can copy over the implementation wholesale because besides the
# `super()` thing above, ScriptModule behaves exactly like
# RecursiveScriptModule
setattr(ScriptModule, name, item)
def _get_methods(cls):
import inspect
# In Python 3 unbound methods are functions, but in Python 2 they are methods
return inspect.getmembers(
cls, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x)
)
_compiled_methods_allowlist = {
"forward",
"register_buffer",
"register_parameter",
"register_module",
"add_module",
"_apply",
"apply",
"cuda",
"cpu",
"to",
"type",
"float",
"double",
"half",
"state_dict",
"_save_to_state_dict",
"load_state_dict",
"_load_from_state_dict",
"_named_members",
"parameters",
"named_parameters",
"buffers",
"named_buffers",
"children",
"named_children",
"modules",
"named_modules",
"zero_grad",
"share_memory",
"_get_name",
"extra_repr",
"_slow_forward",
"_tracing_name",
"eval",
"train",
"get_extra_state",
"set_extra_state"
}
def _make_fail(name):
def fail(self, *args, **kwargs):
raise RuntimeError(name + " is not supported on ScriptModules")
return fail
for name, method in _get_methods(torch.nn.Module):
if name.startswith("__"):
continue
if (
name not in RecursiveScriptModule.__dict__
and name not in _compiled_methods_allowlist
):
setattr(RecursiveScriptModule, method.__name__, _make_fail(name))
else:
# TODO MAKE SURE THAT DISABLING WORKS
class RecursiveScriptClass(object): # type: ignore[no-redef]
def __init__(self):
super().__init__()
class ScriptModule(torch.nn.Module): # type: ignore[no-redef]
def __init__(self, arg=None):
super().__init__()
class RecursiveScriptModule(ScriptModule): # type: ignore[no-redef]
def __init__(self, arg=None):
super().__init__()
def call_prepare_scriptable_func_impl(obj, memo):
if not isinstance(obj, torch.nn.Module):
return obj
obj_id = id(obj)
# If obj_id is in memo, obj has already been prepared or is being
# prepared in another call up the stack.
if obj_id in memo:
return memo[id(obj)]
obj = obj.__prepare_scriptable__() if hasattr(obj, '__prepare_scriptable__') else obj # type: ignore[operator]
# Record obj in memo to avoid infinite recursion in the case of cycles in the module
# hierarchy when recursing below.
memo[obj_id] = obj
new_obj_dict = {}
for name, sub_module in obj.__dict__.items():
if name == '_modules':
for k, v in sub_module.items():
sub_module[k] = call_prepare_scriptable_func_impl(v, memo)
new_obj_dict[name] = sub_module
elif isinstance(sub_module, torch.nn.Module) and not isinstance(sub_module, ScriptModule):
new_obj_dict[name] = call_prepare_scriptable_func_impl(sub_module, memo)
else:
new_obj_dict[name] = sub_module
for k, v in new_obj_dict.items():
obj.__dict__[name] = v
return obj
def call_prepare_scriptable_func(obj):
memo: Dict[int, torch.nn.Module] = {}
return call_prepare_scriptable_func_impl(obj, memo)
def create_script_dict(obj):
"""
Create a ``torch._C.ScriptDict`` instance with the data from ``obj``.
Args:
obj (dict): The Python dictionary that is used to initialize the ``ScriptDict``
returned by this function.
Returns:
An instance of ``torch._C.ScriptDict`` that has the same data as ``obj``
and can be passed between Python and TorchScript with reference semantics and
zero copy overhead.
"""
return torch._C.ScriptDict(obj) # type: ignore[attr-defined]
def create_script_list(obj, type_hint=None):
"""
Create a ``torch._C.ScriptList`` instance with the data from ``obj``.
Args:
obj (dict): The Python list that is used to initialize the ``ScriptList``
returned by this function.
Returns:
An instance of ``torch._C.ScriptList`` that has the same data as ``obj``
and can be passed between Python and TorchScript with reference semantics and
zero copy overhead.
"""
return torch._C.ScriptList(obj) # type: ignore[attr-defined]
def script(obj, optimize=None, _frames_up=0, _rcb=None,
example_inputs: Union[List[Tuple], Dict[Callable, List[Tuple]], None] = None):
r"""
Scripting a function or ``nn.Module`` will inspect the source code, compile
it as TorchScript code using the TorchScript compiler, and return a :class:`ScriptModule` or
:class:`ScriptFunction`. TorchScript itself is a subset of the Python language, so not all
features in Python work, but we provide enough functionality to compute on
tensors and do control-dependent operations. For a complete guide, see the
:ref:`language-reference`.
Scripting a dictionary or list copies the data inside it into a TorchScript instance than can be
subsequently passed by reference between Python and TorchScript with zero copy overhead.
``torch.jit.script`` can be used as a function for modules, functions, dictionaries and lists
and as a decorator ``@torch.jit.script`` for :ref:`torchscript-classes` and functions.
Args:
obj (Callable, class, or nn.Module): The ``nn.Module``, function, class type,
dictionary, or list to compile.
example_inputs (Union[List[Tuple], Dict[Callable, List[Tuple]], None]): Provide example inputs
to annotate the arguments for a function or ``nn.Module``.
Returns:
If ``obj`` is ``nn.Module``, ``script`` returns
a :class:`ScriptModule` object. The returned :class:`ScriptModule` will
have the same set of sub-modules and parameters as the
original ``nn.Module``. If ``obj`` is a standalone function,
a :class:`ScriptFunction` will be returned. If ``obj`` is a ``dict``, then
``script`` returns an instance of `torch._C.ScriptDict`. If ``obj`` is a ``list``,
then ``script`` returns an instance of `torch._C.ScriptList`.
**Scripting a function**
The ``@torch.jit.script`` decorator will construct a :class:`ScriptFunction`
by compiling the body of the function.
Example (scripting a function):
.. testcode::
import torch
@torch.jit.script
def foo(x, y):
if x.max() > y.max():
r = x
else:
r = y
return r
print(type(foo)) # torch.jit.ScriptFunction
# See the compiled graph as Python code
print(foo.code)
# Call the function using the TorchScript interpreter
foo(torch.ones(2, 2), torch.ones(2, 2))
.. testoutput::
:hide:
...
****Scripting a function using example_inputs**
Example inputs can be used to annotate a function arguments.
Example (annotating a function before scripting):
.. testcode::
import torch
def test_sum(a, b):
return a + b
# Annotate the arguments to be int
scripted_fn = torch.jit.script(test_sum, example_inputs=[(3, 4)])
print(type(scripted_fn)) # torch.jit.ScriptFunction
# See the compiled graph as Python code
print(scripted_fn.code)
# Call the function using the TorchScript interpreter
scripted_fn(20, 100)
.. testoutput::
:hide:
...
**Scripting an nn.Module**
Scripting an ``nn.Module`` by default will compile the ``forward`` method and recursively
compile any methods, submodules, and functions called by ``forward``. If a ``nn.Module`` only uses
features supported in TorchScript, no changes to the original module code should be necessary. ``script``
will construct :class:`ScriptModule` that has copies of the attributes, parameters, and methods of
the original module.
Example (scripting a simple module with a Parameter):
.. testcode::
import torch
class MyModule(torch.nn.Module):
def __init__(self, N, M):
super(MyModule, self).__init__()
# This parameter will be copied to the new ScriptModule
self.weight = torch.nn.Parameter(torch.rand(N, M))
# When this submodule is used, it will be compiled
self.linear = torch.nn.Linear(N, M)
def forward(self, input):
output = self.weight.mv(input)
# This calls the `forward` method of the `nn.Linear` module, which will
# cause the `self.linear` submodule to be compiled to a `ScriptModule` here
output = self.linear(output)
return output
scripted_module = torch.jit.script(MyModule(2, 3))
Example (scripting a module with traced submodules):
.. testcode::
import torch
import torch.nn as nn
import torch.nn.functional as F
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
# torch.jit.trace produces a ScriptModule's conv1 and conv2
self.conv1 = torch.jit.trace(nn.Conv2d(1, 20, 5), torch.rand(1, 1, 16, 16))
self.conv2 = torch.jit.trace(nn.Conv2d(20, 20, 5), torch.rand(1, 20, 16, 16))
def forward(self, input):
input = F.relu(self.conv1(input))
input = F.relu(self.conv2(input))
return input
scripted_module = torch.jit.script(MyModule())
To compile a method other than ``forward`` (and recursively compile anything it calls), add
the :func:`@torch.jit.export <torch.jit.export>` decorator to the method. To opt out of compilation
use :func:`@torch.jit.ignore <torch.jit.ignore>` or :func:`@torch.jit.unused <torch.jit.unused>`.
Example (an exported and ignored method in a module)::
import torch
import torch.nn as nn
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
@torch.jit.export
def some_entry_point(self, input):
return input + 10
@torch.jit.ignore
def python_only_fn(self, input):
# This function won't be compiled, so any
# Python APIs can be used
import pdb
pdb.set_trace()
def forward(self, input):
if self.training:
self.python_only_fn(input)
return input * 99
scripted_module = torch.jit.script(MyModule())
print(scripted_module.some_entry_point(torch.randn(2, 2)))
print(scripted_module(torch.randn(2, 2)))
Example ( Annotating forward of nn.Module using example_inputs)::
import torch
import torch.nn as nn
from typing import NamedTuple
class MyModule(NamedTuple):
result: List[int]
class TestNNModule(torch.nn.Module):
def forward(self, a) -> MyModule:
result = MyModule(result=a)
return result
pdt_model = TestNNModule()
# Runs the pdt_model in eager model with the inputs provided and annotates the arguments of forward
scripted_model = torch.jit.script(pdt_model, example_inputs={pdt_model: [([10, 20, ], ), ], })
# Run the scripted_model with actual inputs
print(scripted_model([20]))
"""
global type_trace_db
if not _enabled:
return obj
if optimize is not None:
warnings.warn(
"`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
)
# No-op for modules, functions, class instances that are already scripted
if isinstance(obj, RecursiveScriptClass):
return obj
if isinstance(obj, ScriptModule):
return obj
if isinstance(obj, ScriptFunction):
return obj
if example_inputs:
# If MonkeyType is installed, enable profile directed type annotation
# Check if example_inputs are defined and generate call traces
# for the method by running eager mode version of the method with
# the provide example inputs. This logs all the traces in type_trace_db
type_trace_db = JitTypeTraceStore()
if monkeytype_trace:
monkeytype_config = JitTypeTraceConfig(type_trace_db)
with monkeytype_trace(monkeytype_config):
if isinstance(example_inputs, Dict):
# If the obj is an nn.Module or a class, then each method is
# executed with the arguments provided in the example inputs.
# example inputs here will be of type Dict(class.method, (arguments))
# This is used to infer type annotations for those methods
# which are not called directly under the hood of monkeytype.
for module, example_input in example_inputs.items():
for example in example_input:
module(*example)
elif isinstance(example_inputs, List):
for examples in example_inputs:
obj(*examples)
else:
raise ValueError("Error: Unable to infer types. Please format the inputs to type `List[Tuple]`"
" or `Dict[Callable, List[Tuple]]` to be run with MonkeyType.")
else:
warnings.warn("Warning: monkeytype is not installed. Please install https://github.com/Instagram/MonkeyType "
"to enable Profile-Directed Typing in TorchScript. Refer to "
"https://github.com/Instagram/MonkeyType/blob/master/README.rst to install MonkeyType. ")
if isinstance(obj, torch.nn.Module):
obj = call_prepare_scriptable_func(obj)
return torch.jit._recursive.create_script_module(
obj, torch.jit._recursive.infer_methods_to_compile
)
if isinstance(obj, dict):
return create_script_dict(obj)
if isinstance(obj, list):
return create_script_list(obj)
if inspect.isclass(obj):
qualified_name = _qualified_name(obj)
# If this type is a `nn.Module` subclass, they probably meant to pass
# an instance instead of a Module
if issubclass(obj, torch.nn.Module):
raise RuntimeError(
"Type '{}' cannot be compiled since it inherits"
" from nn.Module,"
" pass an instance instead".format(obj)
)
# Enums are automatically usable in TorchScript, explicitly scripting
# is not necessary, but not harmful either.
if issubclass(obj, enum.Enum):
return obj
if not _is_new_style_class(obj):
raise RuntimeError(
"TorchScript classes must be new-style classes. "
"Please inherit from 'object'."
)
if len(obj.mro()) > 2:
raise RuntimeError(
"TorchScript classes does not support inheritance yet. "
"Please directly inherit from 'object'."
)
if _rcb is None:
_rcb = _jit_internal.createResolutionCallbackFromFrame(_frames_up + 1)
_compile_and_register_class(obj, _rcb, qualified_name)
return obj
elif inspect.isfunction(obj) or inspect.ismethod(obj):
qualified_name = _qualified_name(obj)
# this is a decorated fn, and we need to the underlying fn and its rcb
if hasattr(obj, "__script_if_tracing_wrapper"):
obj = obj.__original_fn # type: ignore[union-attr]
_rcb = _jit_internal.createResolutionCallbackFromClosure(obj)
# some functions are explicitly marked as not supported in script mode
if hasattr(obj, "__script_unsupported"):
raise RuntimeError("TorchScript error: " + obj.__script_unsupported)
_check_directly_compile_overloaded(obj)
maybe_already_compiled_fn = _try_get_jit_cached_function(obj)
if maybe_already_compiled_fn:
return maybe_already_compiled_fn
ast = get_jit_def(obj, obj.__name__)
if _rcb is None:
_rcb = _jit_internal.createResolutionCallbackFromClosure(obj)
fn = torch._C._jit_script_compile(
qualified_name, ast, _rcb, get_default_args(obj)
)
# Forward docstrings
fn.__doc__ = obj.__doc__
_set_jit_function_cache(obj, fn)
return fn
else:
return torch.jit._recursive.create_script_class(obj)
# overloads are registered in _jit_internal and compiled here so that _overload
# can be used in nn/functional.py without an import cycle
def _check_overload_defaults(impl_defaults, overload_defaults, loc):
for name, overload_value in overload_defaults.items():
if name not in impl_defaults or impl_defaults[name] != overload_value:
raise torch.jit.frontend.FrontendError(
loc,
"Default parameters on overloads do not affect the runtime so they "
"must equal to the default parameter on the implementation function. Found on "
"parameter {name}".format(name=name),
)
def _compile_function_with_overload(overload_fn, qual_name, impl_fn):
overload_decl = get_jit_def(overload_fn, overload_fn.__name__).decl()
overload_signature = torch.jit.annotations.get_signature(
overload_fn, None, None, inspect.ismethod(overload_fn)
)
impl_ast = get_jit_def(impl_fn, impl_fn.__name__)
overload_defaults = get_default_args(overload_fn)
implementation_defaults = get_default_args(impl_fn)
_rcb = _jit_internal.createResolutionCallbackFromClosure(impl_fn)
_check_overload_defaults(
implementation_defaults, overload_defaults, overload_decl.range()
)
fn = torch._C._jit_script_compile_overload(
qual_name,
overload_decl,
impl_ast,
_rcb,
implementation_defaults,
overload_signature,
)
return fn
def _get_overloads(obj):
# check for cached compiled fns
existing_compiled_fns = _try_get_jit_cached_overloads(obj)
qual_name = _qualified_name(obj)
uncompiled_overloads = _jit_internal._get_fn_overloads(qual_name)
if uncompiled_overloads is None:
return existing_compiled_fns
if obj in uncompiled_overloads:
raise RuntimeError(_jit_internal.get_overload_no_implementation_error_message(
'function', obj))
compiled_fns = []
for overload_fn in uncompiled_overloads:
compiled_fns.append(
_compile_function_with_overload(overload_fn, qual_name, obj)
)
if existing_compiled_fns:
compiled_fns = existing_compiled_fns + compiled_fns
# cache compilation, remove information stored to do compilation
_set_jit_overload_cache(obj, compiled_fns)
_jit_internal._clear_fn_overloads(qual_name)
return compiled_fns
def _check_directly_compile_overloaded(obj):
qual_name = _qualified_name(obj)
if _jit_internal._get_fn_overloads(qual_name) or _try_get_jit_cached_overloads(obj):
raise RuntimeError(
"Function {} cannot be directly compiled because it"
" is overloaded. It must be used in a context of a function"
" where its inputs can determine which overload to call.".format(qual_name)
)
def interface(obj):
if not inspect.isclass(obj):
raise RuntimeError("interface must be applied to a class")
if not _is_new_style_class(obj):
raise RuntimeError("TorchScript interfaces must inherit from 'object'")
# Expected MRO is:
# User module
# torch.nn.modules.module.Module
# object
is_module_interface = issubclass(obj, torch.nn.Module) and len(obj.mro()) == 3
if not is_module_interface and len(obj.mro()) > 2:
raise RuntimeError(
"TorchScript interface does not support inheritance yet. "
"Please directly inherit from 'object' or 'nn.Module'."
)
qualified_name = _qualified_name(obj)
rcb = _jit_internal.createResolutionCallbackFromFrame(1)
# if this type is a `nn.Module` subclass, generate a module interface type
# instead of a class interface type; a module interface type only compiles
# the user provided methods as part of the interface
ast = get_jit_class_def(obj, obj.__name__)
mangled_classname = torch._C._jit_script_interface_compile(
qualified_name, ast, rcb, is_module_interface
)
obj.__torch_script_interface__ = mangled_classname
return obj
def _recursive_compile_class(obj, loc):
_qual_name = _qualified_name(obj)
# We're starting a new compilation, so update the error call stack in
# case it fails
error_stack = torch._C.CallStack(_qual_name, loc)
rcb = _jit_internal.createResolutionCallbackForClassMethods(obj)
return _compile_and_register_class(obj, rcb, _qual_name)
CompilationUnit = torch._C.CompilationUnit
set_module(CompilationUnit, "torch.jit")
def pad(s: str, padding: int, offset: int = 0, char: str = ' '):
if padding >= len(s):
padding -= len(s)
return ''.join([char for _ in range(padding + offset)]) + s
class _ScriptProfileColumn:
def __init__(self, header: str, alignment: int = 4, offset: int = 0):
self.header = header
self.alignment = alignment
self.offset = offset
self.rows: Dict[int, Any] = {}
def add_row(self, lineno: int, value: Any):
self.rows[lineno] = value
def materialize(self):
max_length = len(self.header)
rows: List[Tuple[int, str]] = []
for (key, value) in self.rows.items():
cell = str(value)
rows.append((key, cell))
max_length = max(len(cell), max_length)
if self.alignment > 0:
padding = max_length + self.alignment
padding -= padding % self.alignment
else:
padding = 0
rows = [(key, pad(cell, padding, self.offset)) for key, cell in rows]
return pad(self.header, padding, self.offset), rows
class _ScriptProfileTable:
def __init__(self, cols: List[_ScriptProfileColumn], source_range: List[int]):
self.cols = cols
self.source_range = source_range
def dump_string(self):
outputs: List[str] = []
cells: List[Tuple[str, Dict[int, str]]] = []
header_buffer = ''
for col in self.cols:
header, rows = col.materialize()
header_buffer += header
cells.append((header, dict(rows)))
outputs.append(header_buffer)
outputs.append(pad('', len(header_buffer), 0, '='))
for line in self.source_range:
row_buffer = ''
for header, rows in cells:
cell = rows.get(line)
if cell is None:
row_buffer += pad('', len(header))
else:
row_buffer += cell
outputs.append(row_buffer)
return '\n'.join(outputs)
class _ScriptProfile:
def __init__(self):
self.profile = classes.profiling._ScriptProfile()
def enable(self):
self.profile.enable()
def disable(self):
self.profile.disable()
def dump_string(self) -> str:
outputs: List[str] = []
for source_stats in self.profile._dump_stats():
source_ref = source_stats.source()
source_lines = source_ref.text().splitlines()
dedent = min([len(line) - len(line.lstrip(' ')) for line in source_lines])
source_lines = [line[dedent:] for line in source_lines]
start_line = source_ref.starting_lineno()
end_line = start_line + len(source_lines)
source_range = range(start_line, end_line)
lineno = _ScriptProfileColumn("Line #")
hits = _ScriptProfileColumn("Hits")
time_ns = _ScriptProfileColumn("Time (ns)")
line_contents = _ScriptProfileColumn("Line Contents", 0, 1)
stats = source_stats.line_map()
for line in source_range:
lineno.add_row(line, line)
line_contents.add_row(line, source_lines[line - start_line])
stat = stats.get(line)
if stat is not None:
hits.add_row(line, stat.count())
time_ns.add_row(line, stat.duration_ns())
table = _ScriptProfileTable([lineno, hits, time_ns, line_contents], list(source_range))
outputs.append(table.dump_string())
return '\n\n'.join(outputs)
def dump(self):
print(self.dump_string())
def _unwrap_optional(x):
assert x is not None, "Unwrapping null optional"
return x
_register_builtin(_unwrap_optional, "aten::_unwrap_optional")
_register_builtin(_jit_internal.is_scripting, "aten::is_scripting")
_register_builtin(has_torch_function, "aten::has_torch_function")
_register_builtin(has_torch_function_unary, "aten::has_torch_function")
_register_builtin(has_torch_function_variadic, "aten::has_torch_function")
| pytorch-master | torch/jit/_script.py |
from typing import List, Any, Optional, Union, Dict, Callable, Tuple
import math
number = Union[int, float]
# flake8: noqa
###
# There are generated files that depend on this file
# To re-generate, please run from the root of the repo:
# python torchgen/shape_functions/gen_jit_shape_functions.py
# How to test:
# After regenerating files, compile PyTorch.
# Then run: ./build/bin/test_jit --gtest_filter=TestShapeGraphLinting.Basic
# If you have enabled opinfo testing for the op, also run:
# python test/test_ops_jit.py TestJitCPU::test_variant_consistency_jit_[FAILING_OP]_cpu_float32
# to reproduce errors from opinfo tests.
# Example PR: https://github.com/pytorch/pytorch/pull/80860/files
####
import torch
def broadcast(a: List[int], b: List[int]):
dimsA = len(a)
dimsB = len(b)
ndim = max(dimsA, dimsB)
expandedSizes: List[int] = []
for i in range(ndim):
offset = ndim - 1 - i
dimA = dimsA - 1 - offset
dimB = dimsB - 1 - offset
sizeA = a[dimA] if (dimA >= 0) else 1
sizeB = b[dimB] if (dimB >= 0) else 1
if sizeA != sizeB and sizeA != 1 and sizeB != 1:
# TODO: only assertion error is bound in C++ compilation right now
raise AssertionError(
"The size of tensor a {} must match the size of tensor b ("
"{}) at non-singleton dimension {}".format(sizeA, sizeB, i)
)
expandedSizes.append(sizeB if sizeA == 1 else sizeA)
return expandedSizes
def broadcast_three(a: List[int], b: List[int], c: List[int]):
return broadcast(broadcast(a, b), c)
def broadcast_one_three(a: List[int], b: Any, c: List[int]):
return broadcast(a, c)
def adaptive_avg_pool2d(self: List[int], out: List[int]):
assert len(out) == 2
assert len(self) == 3 or len(self) == 4
for i in range(1, len(self)):
assert self[i] != 0
shape: List[int] = []
for i in range(0, len(self) - 2):
shape.append(self[i])
for elem in out:
shape.append(elem)
return shape
def _copy(self: List[int]):
out: List[int] = []
for elem in self:
out.append(elem)
return out
def unary(self: List[int]):
return _copy(self)
def broadcast_inplace(a: List[int], b: List[int]):
dimsA = len(a)
dimsB = len(b)
if dimsB > dimsA:
raise AssertionError(
"The dims of tensor b ({}) must be less than or equal to"
"the dims of tensor a ({}) ".format(dimsB, dimsA)
)
for dimA in range(dimsA):
dimB = dimsB - dimsA + dimA
sizeA = a[dimA]
sizeB = b[dimB] if (dimB >= 0) else 1
if sizeA != sizeB and sizeB != 1:
# TODO: only assertion error is bound in C++ compilation right now
raise AssertionError(
"The size of tensor a {} must match the size of tensor b ("
"{}) at non-singleton dimension {}".format(sizeA, sizeB, dimA)
)
return _copy(a)
def expand(self: List[int], sizes: List[int]):
assert len(sizes) >= len(self)
ndim = len(sizes)
tensor_dim = len(self)
if ndim == 0:
return _copy(sizes)
out: List[int] = []
for i in range(ndim):
offset = ndim - 1 - i
dim = tensor_dim - 1 - offset
size = self[dim] if dim >= 0 else 1
targetSize = sizes[i]
if targetSize == -1:
assert dim >= 0
targetSize = size
if size != targetSize:
assert size == 1
size = targetSize
out.append(size)
return out
def expand_one_unused(self: List[int], sizes: List[int], inp0: Any):
return expand(self, sizes)
def infer_size_impl(shape: List[int], numel: int) -> List[int]:
newsize = 1
infer_dim: Optional[int] = None
for dim in range(len(shape)):
if shape[dim] == -1:
if infer_dim is not None:
raise AssertionError("only one dimension can be inferred")
infer_dim = dim
elif shape[dim] >= 0:
newsize *= shape[dim]
else:
raise AssertionError("invalid shape dimensions")
if not (
numel == newsize
or (infer_dim is not None and newsize > 0 and numel % newsize == 0)
):
raise AssertionError("invalid shape")
out = _copy(shape)
if infer_dim is not None:
out[infer_dim] = numel // newsize
return out
def numel(sizes: List[int]):
numel = 1
for elem in sizes:
numel *= elem
return numel
def view(self: List[int], sizes: List[int]):
return infer_size_impl(sizes, numel(self))
def view_one_unused(self: List[int], sizes: List[int], *, implicit: bool = False):
return view(self, sizes)
def sum_mean_dim(self: List[int], opt_dims: Optional[List[int]], keep_dim: bool, dt: Any):
out: List[int] = []
if opt_dims is None:
dims: List[int] = []
else:
dims = opt_dims
for idx in range(len(self)):
is_mean_dim: bool = False
for reduce_dim in dims:
if idx == maybe_wrap_dim(reduce_dim, len(self)):
is_mean_dim = True
if is_mean_dim:
if keep_dim:
out.append(1)
else:
out.append(self[idx])
return out
def max_dim(self: List[int], dim: int, keep_dim: bool):
out = sum_mean_dim(self, [dim], keep_dim, None)
return out, out
# note: python already rounds down towards negative infinity on integer division, special arithmetic not needed
def div_rtn(x: int, y: int):
return x // y
def pooling_output_shape_pad_lr(
inputSize: int,
kernelSize: int,
pad_l: int,
pad_r: int,
stride: int,
dilation: int,
ceil_mode: bool,
):
outputSize = (
div_rtn(
inputSize
+ pad_l
+ pad_r
- dilation * (kernelSize - 1)
- 1
+ (stride - 1 if ceil_mode else 0),
stride,
)
+ 1
)
if ceil_mode:
if (outputSize - 1) * stride >= inputSize + pad_l:
outputSize = outputSize - 1
return outputSize
def pooling_output_shape(
inputSize: int,
kernelSize: int,
pad_l: int,
stride: int,
dilation: int,
ceil_mode: bool,
):
assert stride != 0, "stride should not be zeero"
return pooling_output_shape_pad_lr(
inputSize, kernelSize, pad_l, pad_l, stride, dilation, ceil_mode
)
def pool2d_shape_check(
input: List[int],
kH: int,
kW: int,
dH: int,
dW: int,
padH: int,
padW: int,
dilationH: int,
dilationW: int,
nInputPlane: int,
inputHeight: int,
inputWidth: int,
outputHeight: int,
outputWidth: int,
):
ndim = len(input)
nOutputPlane = nInputPlane
assert kW > 0 and kH > 0
assert dW > 0 and dH > 0
assert dilationH > 0 and dilationW > 0
valid_dims = input[1] != 0 and input[2] != 0
assert (
ndim == 3
and input[0] != 0
and valid_dims
or (ndim == 4 and valid_dims and input[3] != 0)
)
assert kW // 2 >= padW and kH // 2 >= padH
assert outputWidth >= 1 and outputHeight >= 1
def max_pool2d(
input: List[int],
kernel_size: List[int],
stride: List[int],
padding: List[int],
dilation: List[int],
ceil_mode: bool,
):
assert (
len(kernel_size) == 1 or len(kernel_size) == 2
), "max_pool2d: kernel_size must either be a single int, or a tuple of two ints"
kH = kernel_size[0]
kW = kH if len(kernel_size) == 1 else kernel_size[1]
assert (
len(stride) == 0 or len(stride) == 1 or len(stride) == 2
), "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints"
dH = kH if len(stride) == 0 else stride[0]
if len(stride) == 0:
dW = kW
elif len(stride) == 1:
dW = dH
else:
dW = stride[1]
assert (
len(padding) == 1 or len(padding) == 2
), "max_pool2d: padding must be either be a single int, or a tuple of two ints"
padH = padding[0]
padW = padH if len(padding) == 1 else padding[1]
assert (
len(dilation) == 1 or len(dilation) == 2
), "max_pool2d: dilation must be either a single int, or a tuple of two ints"
dilationH = dilation[0]
dilationW = dilationH if len(dilation) == 1 else dilation[1]
assert len(input) == 3 or len(input) == 4
nbatch = input[-4] if len(input) == 4 else 1
nInputPlane = input[-3]
inputHeight = input[-2]
inputWidth = input[-1]
outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, dilationH, ceil_mode)
outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, dilationW, ceil_mode)
pool2d_shape_check(
input,
kH,
kW,
dH,
dW,
padH,
padW,
dilationH,
dilationW,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
)
if len(input) == 3:
return [nInputPlane, outputHeight, outputWidth]
else:
return [nbatch, nInputPlane, outputHeight, outputWidth]
def max_pool2d_with_indices(
input: List[int],
kernel_size: List[int],
stride: List[int],
padding: List[int],
dilation: List[int],
ceil_mode: bool,
):
out = max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)
return (out, out)
def upsample_nearest2d(
input: List[int],
output_size: Optional[List[int]],
scale_factors: Optional[List[float]],
):
out: List[int] = []
out.append(input[0])
out.append(input[1])
if output_size is not None:
assert (
scale_factors is None
), "Must specify exactly one of output_size and scale_factors"
assert len(output_size) == 2
out.append(output_size[0])
out.append(output_size[1])
return out
if scale_factors is not None:
assert (
output_size is None
), "Must specify exactly one of output_size and scale_factors"
assert len(scale_factors) == 2
out.append(int(input[2] * scale_factors[0]))
out.append(int(input[3] * scale_factors[1]))
return out
assert 0, "Either output_size or scale_factors must be presented"
def mm(self: List[int], mat2: List[int]):
assert len(self) == 2, "self must be a matrix"
assert len(mat2) == 2, "mat2 must be a matrix"
assert self[1] == mat2[0]
return [self[0], mat2[1]]
def dot(self: List[int], tensor: List[int]):
assert len(self) == 1 and len(tensor) == 1
assert self[0] == tensor[0]
out: List[int] = []
return out
def mv(self: List[int], vec: List[int]):
assert len(self) == 2 and len(vec) == 1
assert self[1] == vec[0]
# TODO: return self
return [self[0]]
def unsqueeze(li: List[int], dim: int):
dim = maybe_wrap_dim(dim, len(li) + 1)
out = _copy(li)
out.insert(dim, 1)
return out
def squeeze_nodim(li: List[int]):
out: List[int] = []
for i in range(len(li)):
if li[i] != 1:
out.append(li[i])
return out
def squeeze(li: List[int], dim: int):
out: List[int] = []
wrapped_dim = maybe_wrap_dim(dim, len(li))
for i in range(len(li)):
if i == wrapped_dim:
if li[i] != 1:
out.append(li[i])
else:
out.append(li[i])
return out
def index_select(self: List[int], dim: int, index: List[int]):
dim = maybe_wrap_dim(dim, len(self))
numel = multiply_integers(index)
assert len(index) <= 1
assert dim == 0 or dim < len(self)
result_size: List[int] = []
for i in range(len(self)):
if dim == i:
result_size.append(numel)
else:
result_size.append(self[i])
return result_size
def embedding(
weight: List[int],
indices: List[int],
padding_idx: int = -1,
scale_grad_by_freq: bool = False,
sparse: bool = False,
):
assert len(weight) == 2
if len(indices) == 1:
return index_select(weight, 0, indices)
size = _copy(indices)
size.append(weight[1])
return size
def max_int():
return 9223372036854775807
def slice(
self: List[int], dim: int, start: Optional[int], end: Optional[int], step: int
):
ndim = len(self)
assert ndim != 0
dim = maybe_wrap_dim(dim, ndim)
start_val = start if start is not None else 0
end_val = end if end is not None else max_int()
assert step > 0
if start_val == max_int():
start_val = 0
if start_val < 0:
start_val += self[dim]
if end_val < 0:
end_val += self[dim]
if start_val < 0:
start_val = 0
elif start_val > self[dim]:
start_val = self[dim]
if end_val < start_val:
end_val = start_val
elif end_val >= self[dim]:
end_val = self[dim]
slice_len = end_val - start_val
out = _copy(self)
out[dim] = (slice_len + step - 1) // step
return out
def check_cat_no_zero_dim(tensors: List[List[int]]):
for tensor in tensors:
assert len(tensor) > 0
def legacy_cat_wrap_dim(dim: int, tensor_sizes: List[List[int]]):
out_dim: Optional[int] = None
for size in tensor_sizes:
if not (len(size) == 1 and size[0] == 0):
if out_dim is None:
out_dim = maybe_wrap_dim(dim, len(size))
if out_dim is None:
out_dim = dim
return out_dim
def should_skip(tensor: List[int]):
return numel(tensor) == 0 and len(tensor) == 1
def check_cat_shape_except_dim(
first: List[int], second: List[int], dimension: int, index: int
):
first_dims = len(first)
second_dims = len(second)
assert first_dims == second_dims, "Tensors must have same number of dimensions"
for dim in range(0, first_dims):
if dim != dimension:
assert (
first[dim] == second[dim]
), "Sizes of tensors must match except in dimension"
def cat(tensors: List[List[int]], dim: int):
check_cat_no_zero_dim(tensors)
dim = legacy_cat_wrap_dim(dim, tensors)
assert len(tensors) > 0
not_skipped_tensor: Optional[List[int]] = None
for tensor in tensors:
if not should_skip(tensor):
not_skipped_tensor = tensor
if not_skipped_tensor is None:
return [0]
cat_dim_size = 0
for i in range(len(tensors)):
tensor = tensors[i]
if not should_skip(tensor):
check_cat_shape_except_dim(not_skipped_tensor, tensor, dim, i)
cat_dim_size = cat_dim_size + tensor[dim]
result_size = _copy(not_skipped_tensor)
result_size[dim] = cat_dim_size
return result_size
def select(self: List[int], dim: int, index: int):
ndim = len(self)
assert ndim != 0
dim = maybe_wrap_dim(dim, ndim)
size = self[dim]
assert not (index < -size or index >= size)
if index < 0:
index += size
out: List[int] = []
for i in range(ndim):
if i != dim:
out.append(self[i])
return out
def matmul(tensor1: List[int], tensor2: List[int]):
dim_tensor1 = len(tensor1)
dim_tensor2 = len(tensor2)
if dim_tensor1 == 1 and dim_tensor2 == 1:
return dot(tensor1, tensor2)
elif dim_tensor1 == 2 and dim_tensor2 == 1:
return mv(tensor1, tensor2)
elif dim_tensor1 == 1 and dim_tensor2 == 2:
return squeeze(mm(unsqueeze(tensor1, 0), tensor2), 0)
elif dim_tensor1 == 2 and dim_tensor2 == 2:
return mm(tensor1, tensor2)
elif dim_tensor1 >= 1 and dim_tensor2 >= 1:
# We are multiplying b1 x n x m1 by x2 x m2 x p (where b1 can be a list);
# we track m1 vs m2 separately even though they must match for nicer error messages
n = tensor1[-2] if dim_tensor1 > 1 else 1
m1 = tensor1[-1]
batch_tensor1: List[int] = []
# TODO: handling of slice
for i in range(dim_tensor1 - 2):
batch_tensor1.append(tensor1[i])
m2 = tensor2[-1] if dim_tensor2 > 1 else 1
p = tensor2[-1]
batch_tensor2: List[int] = []
# TODO: handling of slice
for i in range(dim_tensor2 - 2):
batch_tensor2.append(tensor2[i])
# expand the batch portion (i.e. cut off matrix dimensions and expand rest)
expand_batch_portion = broadcast(batch_tensor1, batch_tensor2)
# todo: copy ?
output_shape = expand_batch_portion
if dim_tensor1 > 1:
output_shape.append(n)
if dim_tensor2 > 1:
output_shape.append(p)
return output_shape
else:
assert False, "both arguments to matmul need to be at least 1D"
def t(self: List[int]):
assert len(self) <= 2
self_len = len(self)
if self_len == 0:
out: List[int] = []
return out
elif self_len == 1:
return [self[0]]
else:
return [self[1], self[0]]
def transpose(self: List[int], dim0: int, dim1: int):
ndims = len(self)
dim0 = maybe_wrap_dim(dim0, ndims)
dim1 = maybe_wrap_dim(dim1, ndims)
if dim0 == dim1:
return _copy(self)
out: List[int] = []
for i in range(ndims):
if i == dim0:
out.append(self[dim1])
elif i == dim1:
out.append(self[dim0])
else:
out.append(self[i])
return out
def linear(input: List[int], weight: List[int], bias: Optional[List[int]]):
out = matmul(input, t(weight))
if bias is not None:
assert broadcast(bias, out) == out
return out
def addmm(self: List[int], mat1: List[int], mat2: List[int], beta: Any, alpha: Any):
return broadcast(self, mm(mat1, mat2))
def check_non_negative(array: List[int]) -> bool:
# TODO: look into rewriting with early return and getting loop unrolling to fire
non_negative = False
for val in array:
if val < 0:
non_negative = True
return non_negative
def check_shape_forward(
input: List[int],
weight_sizes: List[int],
bias: Optional[List[int]],
stride: List[int],
padding: List[int],
dilation: List[int],
groups: int,
):
k = len(input)
weight_dim = len(weight_sizes)
# TODO: assertions could be expanded with the error messages
assert not check_non_negative(padding)
assert not check_non_negative(stride)
assert weight_dim == k
assert weight_sizes[0] >= groups
assert (weight_sizes[0] % groups) == 0
# only handling not transposed
assert input[1] == weight_sizes[1] * groups
assert bias is None or (len(bias) == 1 and bias[0] == weight_sizes[0])
for i in range(2, k):
assert (input[i] + 2 * padding[i - 2]) >= (
dilation[i - 2] * (weight_sizes[i] - 1) + 1
)
# this is not handling transposed convolution yet
def conv_output_size(
input_size: List[int],
weight_size: List[int],
bias: Optional[List[int]],
stride: List[int],
padding: List[int],
dilation: List[int],
groups: int,
):
check_shape_forward(
input_size, weight_size, bias, stride, padding, dilation, groups
)
has_dilation = len(dilation) > 0
dim = len(input_size)
output_size: List[int] = []
input_batch_size_dim = 0
weight_output_channels_dim = 0
output_size.append(input_size[input_batch_size_dim])
output_size.append(weight_size[weight_output_channels_dim])
for d in range(2, dim):
dilation_ = dilation[d - 2] if has_dilation else 1
kernel = dilation_ * (weight_size[d] - 1) + 1
output_size.append(
(input_size[d] + (2 * padding[d - 2]) - kernel) // stride[d - 2] + 1
)
return output_size
def conv1d(
input: List[int],
weight: List[int],
bias: Optional[List[int]],
stride: List[int],
padding: List[int],
dilation: List[int],
groups: int,
):
assert len(weight) == 3
assert len(input) == 3
return conv_output_size(input, weight, bias, stride, padding, dilation, groups)
def conv2d(
input: List[int],
weight: List[int],
bias: Optional[List[int]],
stride: List[int],
padding: List[int],
dilation: List[int],
groups: int,
):
assert len(weight) == 4
assert len(input) == 4
return conv_output_size(input, weight, bias, stride, padding, dilation, groups)
def conv_backwards(grad_output: List[int], input:List[int], weight:List[int], biases:Optional[List[int]]):
# Bias gradient is always generated regardess of if biases is supplied
return _copy(input), _copy(weight), [grad_output[1]]
def conv_transpose2d_input(input: List[int], weight: List[int], bias: Optional[List[int]] = None, stride: Optional[List[int]] = None, padding: Optional[List[int]] = None, output_padding: Optional[List[int]] = None, groups: int = 1, dilation: Optional[List[int]] = None) -> List[int]:
if stride is None:
stride = [1, 1]
if padding is None:
padding = [0, 0]
if output_padding is None:
output_padding = [0, 0]
if dilation is None:
dilation = [1, 1]
has_dilation = len(dilation) > 0
dim = len(input)
output_size: List[int] = []
input_batch_size_dim = 0
weight_output_channels_dim = 0
output_size.append(input[input_batch_size_dim])
output_size.append(weight[weight_output_channels_dim])
for d in range(2, dim):
dilation_ = dilation[d - 2] if has_dilation else 1
kernel = dilation_ * (weight[d] - 1)
output_size.append((input[d] - 1) * stride[d - 2] - 2 * padding[d - 2] + kernel + 1)
return output_size
def conv_forwards(input: List[int], weight: List[int], bias: Optional[List[int]], stride: List[int], padding: List[int], dilation: List[int], transposed: bool, output_padding: List[int], groups: int) -> List[int]:
has_dilation = len(dilation) > 0
dim = len(input)
output_size: List[int] = []
input_batch_size_dim = 0
weight_output_channels_dim = 0
output_size.append(input[input_batch_size_dim])
output_size.append(weight[weight_output_channels_dim])
for d in range(2, dim):
dilation_ = dilation[d - 2] if has_dilation else 1
if transposed:
kernel = dilation_ * (weight[d] - 1)
output_size.append((input[d] - 1) * stride[d - 2] - 2 * padding[d - 2] + kernel + 1)
else:
kernel = dilation_ * (weight[d] - 1) + 1
output_size.append((input[d] + (2 * padding[d - 2]) - kernel) // stride[d - 2] + 1)
return output_size
def batch_norm(
input: List[int],
weight: Optional[List[int]],
bias: Optional[List[int]],
running_mean: Optional[List[int]],
running_var: Optional[List[int]],
training: bool,
momentum: float,
eps: float,
cudnn_enabled: bool,
):
out: List[int] = []
for elem in input:
out.append(elem)
return out
def conv3d(
input: List[int],
weight: List[int],
bias: Optional[List[int]],
stride: List[int],
padding: List[int],
dilation: List[int],
groups: int,
):
assert len(weight) == 5
assert len(input) == 5
return conv_output_size(input, weight, bias, stride, padding, dilation, groups)
def maybe_wrap_dim(dim: int, dim_post_expr: int, wrap_scalar: bool = True):
if dim_post_expr <= 0:
assert wrap_scalar
dim_post_expr = 1
min = -dim_post_expr
max = dim_post_expr - 1
assert not (dim < min or dim > max)
if dim < 0:
dim += dim_post_expr
return dim
def zero_dim_tensor(input: Any):
out: List[int] = []
return out
def multiply_integers(li: List[int]):
out = 1
for elem in li:
out = out * elem
return out
def arange_end(end: number, inp0: Any, inp1: Any, inp2: Any, inp3: Any):
assert end >= 0
return [int(math.ceil(end))]
def arange_start(
start: number, end: number, inp0: Any, inp1: Any, inp2: Any, inp3: Any
):
assert end >= 0
assert end >= start
return [int(math.ceil(end - start))]
def arange_start_step(
start: number, end: number, step: number, inp0: Any, inp1: Any, inp2: Any, inp3: Any
):
assert step != 0
if step < 0:
assert start >= end
else:
assert end >= start
return [int(math.ceil((end - start) / step))]
def permute(input: List[int], dims: List[int]):
assert len(input) == len(dims)
ndim = len(dims)
seen_dims: List[int] = []
newSizes: List[int] = []
for i in range(ndim):
dim = maybe_wrap_dim(dims[i], ndim)
seen_dims.append(dim)
newSizes.append(input[dim])
for i in range(1, ndim):
for j in range(i):
assert seen_dims[i] != seen_dims[j]
return newSizes
def flatten(input: List[int], start_dim: int, end_dim: int):
start_dim = maybe_wrap_dim(start_dim, len(input))
end_dim = maybe_wrap_dim(end_dim, len(input))
assert start_dim <= end_dim
if len(input) == 0:
return [1]
if start_dim == end_dim:
# TODO: return self
out: List[int] = []
for elem in input:
out.append(elem)
return out
slice_numel = 1
for i in range(start_dim, end_dim + 1):
slice_numel *= input[i]
# TODO: use slicing when slice optimization has landed
# slice_numel = multiply_integers(input[start_dim:end_dim - start_dim + 1])
shape: List[int] = []
for i in range(start_dim):
shape.append(input[i])
shape.append(slice_numel)
for i in range(end_dim + 1, len(input)):
shape.append(input[i])
return shape
def nonzero_lower_bound(input: List[int]):
return [0, len(input)]
def nonzero_upper_bound(input: List[int]):
return [numel(input), len(input)]
def _reduce_along_dim(self: List[int], dim: int, keepdim: bool):
dim = maybe_wrap_dim(dim, len(self))
out: List[int] = []
for i, self_dim in enumerate(self):
if i == dim:
if keepdim:
out.append(1)
else:
out.append(self_dim)
return out
def argmax(self: List[int], dim: Optional[int] = None, keepdim: bool = False) -> List[int]:
if dim is None:
return []
return _reduce_along_dim(self, dim, keepdim)
def bmm(self: List[int], mat2: List[int]) -> List[int]:
assert len(self) == 3, "bmm only supports 3D tensors"
assert len(mat2) == 3, "bmm only supports 3D tensors"
assert self[0] == mat2[0], "mismatching batch dimension"
assert self[2] == mat2[1], "mismatching contracting dimension"
return [self[0], self[1], mat2[2]]
def _shape_as_tensor(self: List[int]) -> List[int]:
return [len(self)]
def topk(self: List[int], k: int, dim: int = -1) -> Tuple[List[int], List[int]]:
if len(self) == 0:
result: List[int] = []
else:
assert k <= self[dim], f"k ({k}) is too big for dimension {dim} of size {self[dim]}"
result = _copy(self)
result[dim] = k
return result, result
def nll_loss_forward(self: List[int], target: List[int], weight: Optional[List[int]], reduction: int) -> Tuple[List[int], List[int]]:
# This is taken shamelessly from the meta function in LossNLL.cpp
self_dim = len(self)
target_dim = len(target)
assert 0 < self_dim <= 2
assert target_dim <= 1
no_batch_dim = self_dim == 1 and target_dim == 0
assert no_batch_dim or (self[0] == target[0])
n_classes = self[-1]
scalar_shape: List[int] = []
assert weight is None or (len(weight) == 1 and weight[0] == n_classes)
if reduction == 0 and self_dim == 2:
reduction_shape = [self[0]]
else:
reduction_shape = scalar_shape
return reduction_shape, scalar_shape
def native_layer_norm(input: List[int], normalized_shape: List[int]) -> Tuple[List[int], List[int], List[int]]:
reduction_shape: List[int] = []
num_unreduced_dimensions = len(input) - len(normalized_shape)
assert num_unreduced_dimensions >= 0
for i in range(num_unreduced_dimensions):
reduction_shape.append(input[i])
for i in range(num_unreduced_dimensions, len(input)):
reduction_shape.append(1)
return _copy(input), reduction_shape, reduction_shape
def native_batch_norm(input: List[int], weight: Optional[List[int]], bias: Optional[List[int]], running_mean: Optional[List[int]], running_var: Optional[List[int]], training: bool) -> Tuple[List[int], List[int], List[int]]:
if training:
_size = [input[1]]
else:
_size = [0]
return _copy(input), _size, _size
# TODO: Add support for List[Optional[List[int]]] arguments (i.e. `Tensor?[]`).
# def index_Tensor(self: List[int], indices: List[Optional[List[int]]]) -> List[int]:
# assert len(indices) <= len(self), "More indices than dimensions to index"
# broadcasted_shape: List[int] = []
# for index_tensor_shape in indices:
# if index_tensor_shape is not None:
# broadcasted_shape = broadcast(broadcasted_shape, index_tensor_shape)
# return broadcasted_shape
ScriptFn = torch._C.ScriptFunction
shape_compute_graph_mapping : Dict[str, ScriptFn ] = {}
bounded_compute_graph_mapping : Dict[str, Tuple[ScriptFn, ScriptFn]] = {}
script_func_map: Dict[Callable, ScriptFn] = {}
def process_func(func: Callable):
if func not in script_func_map:
scripted_func = torch.jit.script(func)
torch._C._jit_pass_inline(scripted_func.graph)
for _ in range(2):
torch._C._jit_pass_peephole(scripted_func.graph)
torch._C._jit_pass_constant_propagation(scripted_func.graph)
script_func_map[func] = scripted_func
return script_func_map[func]
def add_shape_compute_mapping(operator_schema: str, func: Callable):
global shape_compute_graph_mapping
shape_compute_graph_mapping[operator_schema] = process_func(func)
def add_bounded_compute_mapping(operator_schema: str, lower_bound_func: Callable, upper_bound_func: Callable):
# Adds a shape compute function for both upper and lower bounds
fns = (process_func(lower_bound_func), process_func(upper_bound_func))
bounded_compute_graph_mapping[operator_schema] = fns
add_shape_compute_mapping("aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)", unary)
add_shape_compute_mapping("aten::rsub.Tensor(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", unary)
add_shape_compute_mapping("aten::dropout(Tensor input, float p, bool train) -> Tensor", unary)
add_shape_compute_mapping("aten::adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor", adaptive_avg_pool2d)
add_shape_compute_mapping("prim::NumToTensor.Scalar(Scalar a) -> Tensor", zero_dim_tensor)
add_shape_compute_mapping("prim::NumToTensor.bool(bool a) -> Tensor", zero_dim_tensor)
add_shape_compute_mapping("aten::zeros(int[] size, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None) -> (Tensor)", unary)
add_shape_compute_mapping("aten::to.dtype(Tensor(a) self, int dtype, bool non_blocking=False, bool copy=False, int? memory_format=None) -> (Tensor(a))", unary)
add_shape_compute_mapping("aten::arange(Scalar end, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None) -> (Tensor)", arange_end)
add_shape_compute_mapping("aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", arange_start)
add_shape_compute_mapping("aten::arange.start_step(Scalar start, Scalar end, Scalar step, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", arange_start_step)
add_shape_compute_mapping("aten::squeeze(Tensor(a) self) -> Tensor(a)", squeeze_nodim)
add_shape_compute_mapping("aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)", squeeze)
add_shape_compute_mapping("aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)", unsqueeze)
add_shape_compute_mapping("aten::slice.Tensor(Tensor(a) self, int dim=0, int? start=None, int? end=None, int step=1) -> Tensor(a)", slice)
add_shape_compute_mapping("aten::select.int(Tensor(a) self, int dim, int index) -> Tensor(a)", select)
add_shape_compute_mapping("aten::index_select(Tensor self, int dim, Tensor index) -> Tensor", index_select)
add_shape_compute_mapping("aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, "
"float eps=1e-05, bool cudnn_enable=True) -> Tensor", unary)
add_shape_compute_mapping("aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", unary)
add_shape_compute_mapping("aten::_no_grad_embedding_renorm_(Tensor weight, Tensor input, float max_norm, float norm_type) -> Tensor", unary)
add_shape_compute_mapping("aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)", unary)
add_shape_compute_mapping("aten::embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor", embedding)
add_shape_compute_mapping("aten::mm(Tensor self, Tensor mat2) -> Tensor", mm)
add_shape_compute_mapping("aten::dot(Tensor self, Tensor tensor) -> Tensor", dot)
add_shape_compute_mapping("aten::mv(Tensor self, Tensor vec) -> Tensor", mv)
add_shape_compute_mapping("aten::matmul(Tensor self, Tensor other) -> Tensor", matmul)
add_shape_compute_mapping("aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor", linear)
add_shape_compute_mapping("aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", max_pool2d)
add_shape_compute_mapping("aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", max_pool2d_with_indices)
add_shape_compute_mapping("aten::t(Tensor(a) self) -> Tensor(a)", t)
add_shape_compute_mapping("aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)", transpose)
add_shape_compute_mapping("aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor", conv1d)
add_shape_compute_mapping("aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor", conv2d)
add_shape_compute_mapping("aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor", batch_norm)
add_shape_compute_mapping("aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor", conv3d)
add_shape_compute_mapping("aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, int[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", conv_backwards)
add_shape_compute_mapping("aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor", conv_forwards)
add_shape_compute_mapping("aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor", conv_transpose2d_input)
add_shape_compute_mapping("aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)", flatten)
add_shape_compute_mapping("aten::cat(Tensor[] tensors, int dim=0) -> Tensor", cat)
add_shape_compute_mapping("aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)", permute)
add_shape_compute_mapping("aten::view(Tensor(a) self, int[] size) -> Tensor(a)", view)
add_shape_compute_mapping("aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)", expand)
add_shape_compute_mapping("aten::expand(Tensor(a) self, int[] size, *, bool implicit=False) -> Tensor(a)", expand_one_unused)
add_shape_compute_mapping("aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", sum_mean_dim)
add_shape_compute_mapping("aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", sum_mean_dim)
add_shape_compute_mapping("aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", max_dim)
add_shape_compute_mapping("aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor", zero_dim_tensor)
add_shape_compute_mapping("aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor", zero_dim_tensor)
add_shape_compute_mapping("aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", addmm)
add_shape_compute_mapping("aten::upsample_nearest2d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> (Tensor)", upsample_nearest2d)
add_shape_compute_mapping("aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor", unary)
add_shape_compute_mapping("aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor", unary)
add_shape_compute_mapping("aten::dequantize(Tensor self) -> Tensor", unary)
add_shape_compute_mapping("quantized::add(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc", broadcast)
add_shape_compute_mapping("aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor", argmax)
add_shape_compute_mapping("aten::bmm(Tensor self, Tensor mat2) -> Tensor", bmm)
add_shape_compute_mapping("aten::_shape_as_tensor(Tensor self) -> Tensor", _shape_as_tensor)
add_shape_compute_mapping("aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)", topk)
add_shape_compute_mapping("aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight)", nll_loss_forward)
add_shape_compute_mapping("aten::native_layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)", native_layer_norm)
add_shape_compute_mapping("aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)", native_batch_norm)
# TODO: Add support for List[Optional[List[int]]] arguments (i.e. `Tensor?[]`).
#add_shape_compute_mapping("aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor", index_Tensor)
# TODO: migrate over all of symbolic_shape_registry_util.cpp
# These are duplicated here so that the functions will be serialiazed
add_shape_compute_mapping("aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor", broadcast_three)
add_shape_compute_mapping("aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor", broadcast_one_three)
add_shape_compute_mapping("aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", broadcast_inplace)
# quantized_conv_prepack TODO
# Shape Compute Fn with upper and lower bounds
add_bounded_compute_mapping("aten::nonzero(Tensor self) -> (Tensor)", nonzero_lower_bound, nonzero_upper_bound)
| pytorch-master | torch/jit/_shape_functions.py |
import torch
from torch._ops import OpOverload, OpOverloadPacket
def _register_decomposition(op: OpOverload, graph: torch._C.Graph):
assert not isinstance(op, OpOverloadPacket), f"Must pass specific op overload, not overload packet, found {op}"
assert isinstance(op, OpOverload)
torch._C._jit_register_decomposition_for_schema(op._schema, graph)
| pytorch-master | torch/jit/_decomposition_utils.py |
"""Freezing
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
from typing import Optional, List
import torch
from torch.jit._script import RecursiveScriptModule, ScriptModule
def freeze(mod, preserved_attrs: Optional[List[str]] = None, optimize_numerics: bool = True):
r"""
Freezing a :class:`ScriptModule` will clone it and attempt to inline the cloned
module's submodules, parameters, and attributes as constants in the TorchScript IR Graph.
By default, `forward` will be preserved, as well as attributes & methods specified in
`preserved_attrs`. Additionally, any attribute that is modified within a preserved
method will be preserved.
Freezing currently only accepts ScriptModules that are in eval mode.
Freezing applies generic optimization that will speed up your model regardless of machine.
To further optimize using server-specific settings, run `optimize_for_inference` after
freezing.
Args:
mod (:class:`ScriptModule`): a module to be frozen
preserved_attrs (Optional[List[str]]): a list of attributes to preserve in addition to the forward method.
Attributes modified in preserved methods will also be preserved.
optimize_numerics (bool): If ``True``, a set of optimization passes will be run that does not strictly
preserve numerics. Full details of optimization can be found at `torch.jit.run_frozen_optimizations`.
Returns:
Frozen :class:`ScriptModule`.
Example (Freezing a simple module with a Parameter):
.. testcode::
import torch
class MyModule(torch.nn.Module):
def __init__(self, N, M):
super(MyModule, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
self.linear = torch.nn.Linear(N, M)
def forward(self, input):
output = self.weight.mm(input)
output = self.linear(output)
return output
scripted_module = torch.jit.script(MyModule(2, 3).eval())
frozen_module = torch.jit.freeze(scripted_module)
# parameters have been removed and inlined into the Graph as constants
assert len(list(frozen_module.named_parameters())) == 0
# See the compiled graph as Python code
print(frozen_module.code)
Example (Freezing a module with preserved attributes)
.. testcode::
import torch
class MyModule2(torch.nn.Module):
def __init__(self):
super(MyModule2, self).__init__()
self.modified_tensor = torch.tensor(10.)
self.version = 1
def forward(self, input):
self.modified_tensor += 1
return input + self.modified_tensor
scripted_module = torch.jit.script(MyModule2().eval())
frozen_module = torch.jit.freeze(scripted_module, preserved_attrs=["version"])
# we've manually preserved `version`, so it still exists on the frozen module and can be modified
assert frozen_module.version == 1
frozen_module.version = 2
# `modified_tensor` is detected as being mutated in the forward, so freezing preserves
# it to retain model semantics
assert frozen_module(torch.tensor(1)) == torch.tensor(12)
# now that we've run it once, the next result will be incremented by one
assert frozen_module(torch.tensor(1)) == torch.tensor(13)
Note:
Freezing submodule attributes is also supported:
frozen_module = torch.jit.freeze(scripted_module, preserved_attrs=["submodule.version"])
Note:
If you're not sure why an attribute is not being inlined as a constant, you can run
`dump_alias_db` on frozen_module.forward.graph to see if freezing has detected the
attribute is being modified.
Note:
Because freezing makes weights constants and removes module hierarchy, `to` and other
nn.Module methods to manipulate device or dtype no longer work. As a workaround,
You can remap devices by specifying `map_location` in `torch.jit.load`, however
device-specific logic may have been baked into the model.
"""
if not isinstance(mod, ScriptModule):
raise RuntimeError(
"Freezing expects a ScriptModule as input. "
"Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'."
)
if mod.training:
raise RuntimeError(
"Freezing is currently only implemented for modules in eval mode. "
"Please call .eval() on your module before freezing."
)
preserved_attrs = preserved_attrs if preserved_attrs is not None else []
out = RecursiveScriptModule(torch._C._freeze_module(mod._c, preserved_attrs))
RecursiveScriptModule._finalize_scriptmodule(out)
preserved_methods = [x for x in preserved_attrs if mod._c._has_method(x)]
run_frozen_optimizations(out, optimize_numerics, preserved_methods)
return out
def run_frozen_optimizations(
mod, optimize_numerics: bool = True, preserved_methods: Optional[List[str]] = None
):
r"""
Runs a series of optimizations looking for patterns that occur in frozen graphs.
The current set of optimizations includes:
- Dropout Removal
- Pretranspose Linear Layers
- Concat Linear Layers with same input Tensor
- Conv -> Batchnorm folding
- Conv -> Add/Sub folding
- Conv -> Mul/Div folding
Args:
mod (:class:`ScriptModule`): a frozen module to be optimized
optimize_numerics (bool): If ``True``, a set of optimization passes will be run that does not strictly
preserve numerics. These optimizations preserve default rtol and atol of `torch.testing.assert_allclose`
when applied on a single transformation, however in a module where many transformations are applied
the rtol or atol may no longer fall within the default `assert_allclose` tolerance. Conv -> Batchnorm folding,
Conv-Add/Sub, and Conv -> Mul/Div folding all may alter numerics.
Returns:
None
Note:
In rare occassions, this can result in slower execution.
Example (Freezing a module with Conv->Batchnorm)
.. code-block:: python
import torch
in_channels, out_channels = 3, 32
conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=True)
bn = torch.nn.BatchNorm2d(out_channels, eps=.001)
mod = torch.nn.Sequential(conv, bn)
# set optimize to False here, by default freezing runs run_frozen_optimizations
frozen_mod = torch.jit.freeze(torch.jit.script(mod.eval()), optimize=False)
# inspect frozen mod
assert "batch_norm" in str(frozen_mod.graph)
torch.jit.run_frozen_optimizations(frozen_mod)
assert "batch_norm" not in str(frozen_mod.graph)
"""
torch._C._jit_pass_optimize_frozen_graph(mod.graph, optimize_numerics)
if preserved_methods is None:
preserved_methods = []
for method in preserved_methods:
torch._C._jit_pass_optimize_frozen_graph(
mod.__getattr__(method).graph, optimize_numerics
)
def optimize_for_inference(mod: ScriptModule, other_methods: Optional[List[str]] = None) -> ScriptModule:
"""
Performs a set of optimization passes to optimize a model for the
purposes of inference. If the model is not already frozen, optimize_for_inference
will invoke `torch.jit.freeze` automatically.
In addition to generic optimizations that should speed up your model regardless
of environment, prepare for inference will also bake in build specific settings
such as the presence of CUDNN or MKLDNN, and may in the future make transformations
which speed things up on one machine but slow things down on another. Accordingly,
serialization is not implemented following invoking `optimize_for_inference` and
is not guaranteed.
This is still in prototype, and may have the potential to slow down your model.
Primary use cases that have been targeted so far have been vision models on cpu
and gpu to a lesser extent.
Example (optimizing a module with Conv->Batchnorm)::
import torch
in_channels, out_channels = 3, 32
conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=True)
bn = torch.nn.BatchNorm2d(out_channels, eps=.001)
mod = torch.nn.Sequential(conv, bn)
frozen_mod = torch.jit.optimize_for_inference(torch.jit.script(mod.eval()))
assert "batch_norm" not in str(frozen_mod.graph)
# if built with MKLDNN, convolution will be run with MKLDNN weights
assert "MKLDNN" in frozen_mod.graph
"""
if not isinstance(mod, ScriptModule):
raise RuntimeError(
"optimize_for_inference expects a ScriptModule as input. "
"Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'.")
if other_methods is None:
other_methods = []
if hasattr(mod, "training"):
mod = freeze(mod.eval(), preserved_attrs=other_methods)
torch._C._jit_pass_optimize_for_inference(mod._c, other_methods)
return mod
| pytorch-master | torch/jit/_freeze.py |
# These functions are referenced from the pickle archives produced by
# ScriptModule.save()
# These (`build_*`) functions used to be used by `pickler.cpp` to specify
# the type of the list for certain special types, but now all lists get
# a type attached and restored via `restore_type_tag` below. The legacy
# functions should stick around for backwards-compatibility.
def build_intlist(data):
return data
def build_tensorlist(data):
return data
def build_doublelist(data):
return data
def build_boollist(data):
return data
def build_tensor_from_id(data):
if isinstance(data, int):
# just the id, can't really do anything
return data
def restore_type_tag(value, type_str):
# The type_ptr is used by the jit unpickler to restore the full static type
# to container types like list when they are re-loaded, but this doesn't
# matter for Python, so just return the plain value
return value
| pytorch-master | torch/jit/_pickle.py |
import ast
import inspect
import sys
import textwrap
import torch
import warnings
class AttributeTypeIsSupportedChecker(ast.NodeVisitor):
"""
Checks the ``__init__`` method of a given ``nn.Module`` to ensure
that all instance-level attributes can be properly initialized.
Specifically, we do type inference based on attribute values...even
if the attribute in question has already been typed using
Python3-style annotations or ``torch.jit.annotate``. This means that
setting an instance-level attribute to ``[]`` (for ``List``),
``{}`` for ``Dict``), or ``None`` (for ``Optional``) isn't enough
information for us to properly initialize that attribute.
An object of this class can walk a given ``nn.Module``'s AST and
determine if it meets our requirements or not.
Known limitations
1. We can only check the AST nodes for certain constructs; we can't
``eval`` arbitrary expressions. This means that function calls,
class instantiations, and complex expressions that resolve to one of
the "empty" values specified above will NOT be flagged as
problematic.
2. We match on string literals, so if the user decides to use a
non-standard import (e.g. `from typing import List as foo`), we
won't catch it.
Example:
.. code-block:: python
class M(torch.nn.Module):
def fn(self):
return []
def __init__(self):
super().__init__()
self.x: List[int] = []
def forward(self, x: List[int]):
self.x = x
return 1
The above code will pass the ``AttributeTypeIsSupportedChecker``
check since we have a function call in ``__init__``. However,
it will still fail later with the ``RuntimeError`` "Tried to set
nonexistent attribute: x. Did you forget to initialize it in
__init__()?".
Args:
nn_module - The instance of ``torch.nn.Module`` whose
``__init__`` method we wish to check
"""
def check(self, nn_module: torch.nn.Module) -> None:
# Check if we have a Python version <3.8
self.using_deprecated_ast: bool = sys.version_info < (3, 8)
source_lines = inspect.getsource(nn_module.__class__.__init__)
# Ignore comments no matter the indentation
def is_useless_comment(line):
line = line.strip()
return line.startswith("#") and not line.startswith("# type:")
source_lines = "\n".join([l for l in source_lines.split("\n") if not is_useless_comment(l)])
# This AST only contains the `__init__` method of the nn.Module
init_ast = ast.parse(textwrap.dedent(source_lines))
# Get items annotated in the class body
self.class_level_annotations = list(nn_module.__annotations__.keys())
# Flag for later
self.visiting_class_level_ann = False
self.visit(init_ast)
def _is_empty_container(self, node: ast.AST, ann_type: str) -> bool:
if ann_type == "List":
# Assigning `[]` to a `List` type gives you a Node where
# value=List(elts=[], ctx=Load())
if not isinstance(node, ast.List):
return False
if node.elts:
return False
elif ann_type == "Dict":
# Assigning `{}` to a `Dict` type gives you a Node where
# value=Dict(keys=[], values=[])
if not isinstance(node, ast.Dict):
return False
if node.keys:
return False
elif ann_type == "Optional":
# Assigning `None` to an `Optional` type gives you a
# Node where value=Constant(value=None, kind=None)
# or, in Python <3.8, value=NameConstant(value=None)
if (not self.using_deprecated_ast
and not isinstance(node, ast.Constant)):
return False
if (self.using_deprecated_ast
and not isinstance(node, ast.NameConstant)):
return False
if node.value: # type: ignore[attr-defined]
return False
return True
def visit_Assign(self, node):
"""
If we're visiting a Call Node (the right-hand side of an
assignment statement), we won't be able to check the variable
that we're assigning to (the left-hand side of an assignment).
Because of this, we need to store this state in visitAssign.
(Luckily, we only have to do this if we're assigning to a Call
Node, i.e. ``torch.jit.annotate``. If we're using normal Python
annotations, we'll be visiting an AnnAssign Node, which has its
target built in.)
"""
try:
if (isinstance(node.value, ast.Call)
and node.targets[0].attr in self.class_level_annotations):
self.visiting_class_level_ann = True
except AttributeError:
return
self.generic_visit(node)
self.visiting_class_level_ann = False
def visit_AnnAssign(self, node):
"""
Visit an AnnAssign node in an ``nn.Module``'s ``__init__``
method and see if it conforms to our attribute annotation rules.
"""
# If we have a local variable
try:
if node.target.value.id != "self":
return
except AttributeError:
return
# If we have an attribute that's already been annotated at the
# class level
if node.target.attr in self.class_level_annotations:
return
# TODO @ansley: add `Union` once landed
# NB: Even though `Tuple` is a "container", we don't want to
# check for it here. `Tuple` functions as an type with an
# "infinite" number of subtypes, in the sense that you can have
# `Tuple[())]`, `Tuple[T1]`, `Tuple[T2]`, `Tuple[T1, T2]`,
# `Tuple[T2, T1]` and so on, and none of these subtypes can be
# used in place of the other. Therefore, assigning an empty
# tuple in `__init__` CORRECTLY means that that variable
# cannot be reassigned later to a non-empty tuple. Same
# deal with `NamedTuple`
containers = {"List", "Dict", "Optional"}
# If we're not evaluating one of the specified problem types
try:
if node.annotation.value.id not in containers:
return
except AttributeError:
# To evaluate a base type (`str`, `int`, etc.), we would
# have needed to get the name through `node.annotation.id`
# instead of `node.annotation.value.id`. Seems that we're
# not evaluating one of our "containers"
return
# Check if the assigned variable is empty
ann_type = node.annotation.value.id
if not self._is_empty_container(node.value, ann_type):
return
warnings.warn("The TorchScript type system doesn't support "
"instance-level annotations on empty non-base "
"types in `__init__`. Instead, either 1) use a "
"type annotation in the class body, or 2) wrap "
"the type in `torch.jit.Attribute`.")
def visit_Call(self, node):
"""
Visit a Call node in an ``nn.Module``'s ``__init__``
method and determine if it's ``torch.jit.annotate``. If so,
see if it conforms to our attribute annotation rules.
"""
# If we have an attribute that's already been annotated at the
# class level
if self.visiting_class_level_ann:
return
# If this isn't a call to `torch.jit.annotate`
try:
if (node.func.value.value.id != "torch"
or node.func.value.attr != "jit"
or node.func.attr != "annotate"):
self.generic_visit(node)
elif (node.func.value.value.id != "jit"
or node.func.value.attr != "annotate"):
self.generic_visit(node)
except AttributeError:
# Looks like we didn't even have the right node structure
# to check for `torch.jit.annotate` in the first place
self.generic_visit(node)
# Invariant: we have a `torch.jit.annotate` or a
# `torch.annotate` call
# A Call Node for `torch.jit.annotate` should have an `args`
# list of length 2 where args[0] represents the annotation and
# args[1] represents the actual value
if len(node.args) != 2:
return
if not isinstance(node.args[0], ast.Subscript):
return
# See notes in `visit_AnnAssign` r.e. containers
containers = {"List", "Dict", "Optional"}
try:
ann_type = node.args[0].value.id # type: ignore[attr-defined]
except AttributeError:
return
if ann_type not in containers:
return
# Check if the assigned variable is empty
if not self._is_empty_container(node.args[1], ann_type):
return
warnings.warn("The TorchScript type system doesn't support "
"instance-level annotations on empty non-base "
"types in `__init__`. Instead, either 1) use a "
"type annotation in the class body, or 2) wrap "
"the type in `torch.jit.Attribute`.")
| pytorch-master | torch/jit/_check.py |
import torch.jit
from textwrap import dedent
from typing import Dict, Any
def execWrapper(code, glob, loc):
exec(code, glob, loc)
def _gen_unsupported_methods_properties():
tensor_attrs = set(filter(lambda x: x[0] != "_", dir(torch.Tensor)))
tensor = torch.tensor([2])
funcs_template = dedent('''
def func(x):
return x.{op}()
''')
deprecated_apis = set(["volatile", "resize", "reinforce", "new", "name", "map2_", "has_names", "grad_fn", "resize_as"])
tensor_attrs = tensor_attrs - deprecated_apis
properties = []
methods = []
sorted_tensor_attrs = sorted(list(tensor_attrs), key=lambda x: x.lower())
for attr in sorted_tensor_attrs:
funcs_str = funcs_template.format(op=attr)
scope: Dict[str, Any] = {}
execWrapper(funcs_str, globals(), scope)
try:
cu = torch.jit.CompilationUnit(funcs_str)
except Exception as e:
if "nonexistent attribute" not in repr(e):
continue
attr_repr = repr(getattr(tensor, attr))
if "bound method" in attr_repr or "built-in method" in attr_repr:
methods.append(attr)
else:
properties.append(attr)
mapped_methods = ("\t* :meth:`~torch.Tensor." + x + r"`" for x in methods)
mapped_properties = ("\t* :attr:`~torch.Tensor." + x + r"`" for x in properties)
return "\n".join(mapped_methods), "\n".join(mapped_properties)
def _list_unsupported_tensor_ops():
header = """\n\n
Unsupported Tensor Methods
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
methods, properties = _gen_unsupported_methods_properties()
return header + "\n" + methods + """
Unsupported Tensor Properties
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""" + "\n" + properties
__doc__ = _list_unsupported_tensor_ops()
| pytorch-master | torch/jit/unsupported_tensor_ops.py |
import torch
import sys
import ast
import dataclasses
import inspect
import string
import re
from collections import namedtuple
from textwrap import dedent
from typing import List, Tuple # noqa: F401
from torch._C._jit_tree_views import (
ClassDef, Ident, Stmt, Decl, Def, Var,
EmptyTypeAnnotation, Param, ExprStmt, Assign,
Delete, Return, Raise, Assert, AugAssign, While,
For, If, Pass, Break, Continue, Apply, Dots, Select,
TrueLiteral, FalseLiteral, NoneLiteral, Starred,
ListLiteral, TupleLiteral, DictLiteral, Const,
StringLiteral, ListComp, Attribute, BinOp, UnaryOp,
SliceExpr, Subscript, TernaryIf, With, WithItem, Property,
DictComp,
)
from torch._sources import get_source_lines_and_file, parse_def, make_source_context
from torch._sources import ParsedDef as _ParsedDef
from torch.jit._dataclass_impls import DATACLASS_MAGIC_METHODS
from torch.jit._monkeytype_config import monkeytype_trace, get_qualified_name
from torch._jit_internal import should_drop, is_static_fn, FunctionModifiers # noqa: F401
from torch import _jit_internal
import torch.jit.annotations
_IS_ASTUNPARSE_INSTALLED = False
try:
import astunparse # type: ignore[import]
_IS_ASTUNPARSE_INSTALLED = True
except ImportError:
pass
# Borrowed from cPython implementation
# https://github.com/python/cpython/blob/561612d8456cfab5672c9b445521113b847bd6b3/Lib/textwrap.py#L411#
_reserved_prefix = '__jit'
_reserved_names = {'print'}
_identifier_chars = set(string.ascii_lowercase + string.ascii_uppercase + string.digits)
def is_reserved_name(name):
return name.startswith(_reserved_prefix) or name in _reserved_names
pretty_node_names = {
ast.FunctionDef: "function definitions",
ast.For: "for loops",
ast.Delete: "del statements",
ast.ClassDef: "class definitions",
ast.With: "with statements",
ast.Raise: "raise statements",
ast.Assert: "assertions",
ast.Import: "import statements",
ast.ImportFrom: "import statements",
ast.Global: "global variables",
ast.Break: "break statements",
ast.Continue: "continue statements",
}
node_start_tokens = {
ast.FunctionDef: "def",
ast.For: "for",
ast.Delete: "del",
ast.ClassDef: "class",
ast.With: "with",
ast.Raise: "raise",
ast.Assert: "assert",
ast.Import: "import",
ast.ImportFrom: "from",
ast.Global: "global",
ast.Break: "break",
ast.Continue: "continue",
}
pretty_node_names.update({
ast.AsyncFunctionDef: "async function definitions",
ast.AsyncFor: "async for loops",
ast.AsyncWith: "async with statements",
ast.Try: "try blocks",
ast.Nonlocal: "nonlocal variables",
})
node_start_tokens.update({
ast.AsyncFunctionDef: "async def",
ast.AsyncFor: "async for",
ast.AsyncWith: "async with",
ast.Try: "try",
ast.Nonlocal: "nonlocal",
})
if sys.version_info >= (3, 6):
pretty_node_names.update({
ast.AnnAssign: "annotated assignments",
})
# NB: no specific token for AnnAssign
class FrontendError(Exception):
def __init__(self, source_range, msg):
self.source_range = source_range
self.msg = msg
# This has to be instantiated here so the ErrorReport is accurate to the
# call stack when the FrontendError was raised
self.error_report = torch._C.ErrorReport(self.source_range)
def __str__(self):
return self.msg + self.error_report.what().lstrip()
class NotSupportedError(FrontendError):
pass
class UnsupportedNodeError(NotSupportedError):
def __init__(self, ctx, offending_node, reason=''):
# If we don't have a specific token, we default to length of 1
node_type = type(offending_node)
range_len = len(node_start_tokens.get(node_type, ' '))
source_range = ctx.make_range(offending_node.lineno,
offending_node.col_offset,
offending_node.col_offset + range_len)
feature_name = pretty_node_names.get(node_type, node_type.__name__)
msg = "{} {}aren't supported".format(feature_name, reason + ' ' if reason else '')
super(UnsupportedNodeError, self).__init__(source_range, msg)
class FrontendTypeError(FrontendError):
pass
def build_withitems(ctx, items):
items = [build_withitem(ctx, i) for i in items]
return list(items)
def build_stmts(ctx, stmts):
stmts = [build_stmt(ctx, s) for s in stmts]
return list(filter(None, stmts))
def get_class_properties(cls, self_name):
"""
Get a list of Property objects representing the properties of a class.
Args:
cls: The class to get properties of.
self_name: The name of the class that the properties should belong to.
Returns:
A list of Property objects corresponding to the properties of cls. Property
here refers to the subclass of TreeView.
"""
props = inspect.getmembers(
cls, predicate=lambda m: isinstance(m, property))
# Any property that should not compiled must be in this list on the Module.
unused_properties = getattr(cls, "__jit_unused_properties__", [])
# Create Property TreeView objects from inspected property objects.
properties = []
for prop in props:
if prop[0] not in unused_properties and not should_drop(prop[1].fget):
getter = get_jit_def(prop[1].fget, f"__{prop[0]}_getter", self_name=self_name)
setter = get_jit_def(prop[1].fset, f"__{prop[0]}_setter", self_name=self_name) if prop[1].fset else None
properties.append(Property(getter.range(), Ident(getter.range(), prop[0]), getter, setter))
return properties
def get_class_assigns(ctx, cls_ast):
assigns = []
def maybe_build_assign(builder, entry):
nonlocal assigns
try:
assigns.append(builder(ctx, entry))
except NotSupportedError:
pass
for entry in cls_ast.body:
if isinstance(entry, ast.Assign):
maybe_build_assign(StmtBuilder.build_Assign, entry)
elif isinstance(entry, ast.AnnAssign):
maybe_build_assign(StmtBuilder.build_AnnAssign, entry)
return assigns
def get_jit_class_def(cls, self_name):
# Get defs for each method within the current class independently
# TODO: proper overriding analysis when implementing class inheritance
methods = inspect.getmembers(
cls,
predicate=lambda m: (inspect.ismethod(m) or inspect.isfunction(m))
and not is_static_fn(cls, m.__name__)
and m.__name__ in cls.__dict__
)
def is_classmethod(fn):
return inspect.ismethod(fn) and getattr(fn, "__self__", None) == cls
# Get and parse the source code for this class
sourcelines, file_lineno, filename = get_source_lines_and_file(cls, torch._C.ErrorReport.call_stack())
source = ''.join(sourcelines)
dedent_src = dedent(source)
py_ast = ast.parse(dedent_src)
class_ast = py_ast.body[0]
assert isinstance(class_ast, ast.ClassDef)
# Special case for dataclasses. In general we need access to the source code for
# an object in order to JIT compile it. But the dataclasses module dynamically synthesizes
# magic methods for classes, and we can't get the source code for these methods. As a
# workaround, we synthesize TorchScript-friendly implementations ourselves.
if dataclasses.is_dataclass(cls):
# Detect whether the user manually implemented any of the magic methods. If they did,
# we don't want to synthesize/override them.
overrides = {
method.name
for method in class_ast.body
if isinstance(method, ast.FunctionDef) and method.name in DATACLASS_MAGIC_METHODS
}
for i, (name, _) in enumerate(methods):
# Is this a magic method we can synthesize?
synthesizer_fn = DATACLASS_MAGIC_METHODS.get(name)
if synthesizer_fn and name not in overrides:
parsed_def = synthesizer_fn(cls)
methods[i] = name, parsed_def
func = getattr(cls, name)
_jit_internal.loader.cache(func, parsed_def.source)
method_defs = [
get_jit_def(obj, name, self_name=self_name, is_classmethod=is_classmethod(obj))
for (name, obj) in methods
]
properties = get_class_properties(cls, self_name)
leading_whitespace_len = len(source.split('\n', 1)[0]) - len(dedent_src.split('\n', 1)[0])
ctx = make_source_context(source, filename, file_lineno, leading_whitespace_len, False)
assigns = get_class_assigns(ctx, class_ast)
return build_class_def(ctx, class_ast, method_defs, properties, self_name, assigns)
def get_jit_def(fn, def_name, self_name=None, is_classmethod=False):
"""
Build a JIT AST (TreeView) from the given function.
Args:
fn: A function object to compile or a pre-parsed ParsedDef object
def_name: The name to give to the resulting AST object. This is not
always the same as `fn.__name__`, for example:
def _forward(self):
...
forward = _forward
In this case, the `__name__` attribute of the function object is "_forward",
but we want the result AST to have the name "forward".
self_name: If this function is a method, what the type name of `self` is.
"""
parsed_def = parse_def(fn) if not isinstance(fn, _ParsedDef) else fn
type_line = torch.jit.annotations.get_type_line(parsed_def.source)
fn_def = parsed_def.ast.body[0]
if is_classmethod:
arg_name = fn_def.args.args[0].arg
# Insert a statement that assigns the first argument to the class
assign_stmt = ast.parse(f"{arg_name} = {self_name}").body[0]
fn_def.body.insert(0, assign_stmt)
# Swap out the function signature and body if it is unused
if should_drop(fn):
unused_fn_def = ast.parse("def unused_fn(self: Any):\n\traise RuntimeError(\"Cannot call @unused methods\")")
if len(unused_fn_def.body) != 1 or not isinstance(unused_fn_def.body[0], ast.FunctionDef):
raise RuntimeError(f"Expected a single top-level function: {parsed_def.filename}:{parsed_def.file_lineno}")
unused_def = unused_fn_def.body[0]
fn_def.body = unused_def.body
# kwarg/vararg not supported by `build_def`
fn_def.args.kwarg = fn_def.args.vararg = None
for arg in fn_def.args.args + fn_def.args.kwonlyargs:
# Replace potentially unsupported type annotations by "Any"
arg.annotation = unused_def.args.args[0].annotation
# If MonkeyType is installed, get all the consolidated type traces
# for the arguments from type_trace_db
type_trace_db = torch.jit._script._get_type_trace_db()
pdt_arg_types = None
if monkeytype_trace and not isinstance(fn, _ParsedDef):
qualname = get_qualified_name(fn)
pdt_arg_types = type_trace_db.get_args_types(qualname)
return build_def(parsed_def.ctx, fn_def, type_line, def_name, self_name=self_name, pdt_arg_types=pdt_arg_types)
# TODO: more robust handling of recognizing ignore context manager
def is_torch_jit_ignore_context_manager(stmt):
# checks if the statement is torch.jit.ignore context manager
if isinstance(stmt.items[0].context_expr, ast.Call):
# extract torch part
function = stmt.items[0].context_expr.func
if isinstance(function, ast.Attribute):
attr_name = function.attr
attr_value = function.value
if attr_name == "_IgnoreContextManager" and isinstance(attr_value, ast.Attribute):
# there should be at most two nested attributes (e.g torch.jit._IgnoreContextManager)
if attr_value.attr == "jit" and isinstance(attr_value.value, ast.Name):
if attr_value.value.id == "torch":
return True
return False
class Builder(object):
def __call__(self, ctx, node):
method = getattr(self, 'build_' + node.__class__.__name__, None)
if method is None:
raise UnsupportedNodeError(ctx, node)
return method(ctx, node)
def build_class_def(ctx, py_def, methods, properties, self_name, assigns):
r = ctx.make_range(py_def.lineno, py_def.col_offset,
py_def.col_offset + len("class"))
return ClassDef(Ident(r, self_name), [Stmt(method) for method in methods], properties, assigns)
def build_def(ctx, py_def, type_line, def_name, self_name=None, pdt_arg_types=None):
body = py_def.body
r = ctx.make_range(py_def.lineno + len(py_def.decorator_list),
py_def.col_offset,
py_def.col_offset + len("def"))
param_list = build_param_list(ctx, py_def.args, self_name, pdt_arg_types)
return_type = None
if getattr(py_def, 'returns', None) is not None:
return_type = build_expr(ctx, py_def.returns)
decl = Decl(r, param_list, return_type)
is_method = self_name is not None
if type_line is not None:
type_comment_decl = torch._C.parse_type_comment(type_line)
decl = torch._C.merge_type_from_type_comment(decl, type_comment_decl, is_method)
return Def(Ident(r, def_name),
decl,
build_stmts(ctx, body))
_vararg_kwarg_err = ("Compiled functions can't take variable number of arguments "
"or use keyword-only arguments with defaults")
def build_param_list(ctx, py_args, self_name, pdt_arg_types=None):
if py_args.kwarg is not None:
expr = py_args.kwarg
ctx_range = ctx.make_range(expr.lineno, expr.col_offset - 1, expr.col_offset + len(expr.arg))
raise NotSupportedError(ctx_range, _vararg_kwarg_err)
if py_args.vararg is not None:
expr = py_args.vararg
ctx_range = ctx.make_range(expr.lineno, expr.col_offset - 1, expr.col_offset + len(expr.arg))
raise NotSupportedError(ctx_range, _vararg_kwarg_err)
if len(py_args.kw_defaults) > 0:
# kw_defaults is a list of the values for the kwargs (which default to None),
# so they don't actually have line numbers.
for arg in py_args.kw_defaults:
if arg is not None:
ctx_range = build_expr(ctx, arg).range()
raise NotSupportedError(ctx_range, _vararg_kwarg_err)
# List of Tuple of args and type as inferred by profile directed typing
arg_and_types = [(arg, pdt_arg_types[arg.arg] if pdt_arg_types and bool(pdt_arg_types[arg.arg]) else None)
for arg in py_args.args]
arg_and_types_kwonlyargs = [(arg, pdt_arg_types[arg.arg] if pdt_arg_types and bool(pdt_arg_types[arg.arg])
else None) for arg in py_args.kwonlyargs]
result = [build_param(ctx, arg, self_name, kwarg_only=False, pdt_arg_type=arg_type)
for arg, arg_type in arg_and_types]
result += [build_param(ctx, arg, self_name, kwarg_only=True, pdt_arg_type=arg_type)
for arg, arg_type in arg_and_types_kwonlyargs]
return result
def build_param(ctx, py_arg, self_name, kwarg_only, pdt_arg_type=None):
# NB: In Python3 py_arg is a pair of (str arg, expr? annotation)
name = py_arg.arg
r = ctx.make_range(py_arg.lineno, py_arg.col_offset, py_arg.col_offset + len(name))
if getattr(py_arg, 'annotation', None) is not None:
annotation_expr = build_expr(ctx, py_arg.annotation)
elif pdt_arg_type:
annotation_expr = Var(Ident(r, pdt_arg_type))
elif self_name is not None and name == 'self':
annotation_expr = Var(Ident(r, self_name))
else:
annotation_expr = EmptyTypeAnnotation(r)
return Param(annotation_expr, Ident(r, name), kwarg_only)
def build_ignore_context_manager(ctx, stmt):
InputType = namedtuple('InputType', ['name', 'ann'])
OutputType = namedtuple('OutputType', ['name', 'ann'])
def process_ins_outs(args):
# parse the context manager to figure out inputs and outputs
# with their annotated types
# TODO: add input, output validator
inputs = []
outputs = []
for arg in args:
var_name = arg.arg
if sys.version_info < (3, 8):
# Starting python3.8 ast.Str is deprecated
var_ann = arg.value.s
else:
var_ann = arg.value.value
var_decl_type, var_ann = var_ann.split(":")
if var_decl_type == "inp":
inputs.append(InputType(var_name, var_ann))
if var_decl_type == "out":
outputs.append(OutputType(var_name, var_ann))
return inputs, outputs
def create_unique_name_ext(ctx, stmt):
# extension will be based on the full path filename plus
# the line number of original context manager
fn = re.sub(r'[^a-zA-Z0-9_]', '_', ctx.filename)
return f"{fn}_{stmt.lineno}"
def build_return_ann_stmt(outputs):
return_type_ann = ""
return_statement_str = "return "
if len(outputs) == 0:
return_type_ann += " -> None"
if len(outputs) == 1:
return_type_ann = " -> " + outputs[0].ann
return_statement_str += outputs[0].name
if len(outputs) > 1:
return_type_ann = " -> Tuple"
return_type_ann += "[" + ", ".join([var.ann for var in outputs]) + "]"
return_statement_str += ", ".join([var.name for var in outputs])
return return_type_ann, return_statement_str
def build_args(args):
return ", ".join([arg.name for arg in args])
inputs, outputs = process_ins_outs(stmt.items[0].context_expr.keywords)
# build the replacement function str with given inputs and outputs
ignore_function_name = "func_ignore_" + create_unique_name_ext(ctx, stmt)
ignore_function_str = "\ndef " + ignore_function_name
ignore_function_str += "(" + ", ".join([var.name + " :" + var.ann for var in inputs]) + ")"
return_ann, return_stmt = build_return_ann_stmt(outputs)
ignore_function_str += return_ann + ": pass"
# first create the functionDef object from just declaration
ignore_function = ast.parse(ignore_function_str).body[0]
# dump the body of context manager to dummy function
ignore_function.body = stmt.body # type: ignore[attr-defined]
# insert return statement to the function
return_stmt = ast.parse(return_stmt).body[0]
ignore_function.body.append(return_stmt) # type: ignore[attr-defined]
# registers the custom function in the global context
ignore_func_str = "@torch.jit.ignore\n" + astunparse.unparse(ignore_function)
ignore_func_str += "\nglobals()[\"{}\"] = {}".format(ignore_function_name, ignore_function_name)
exec(ignore_func_str) # noqa: P204
# build the statements as:
# <out_1>, <out_2>, ... = torch.jit.frontend.<func>(<in_1>, <in_2>)
assign_str_lhs = build_args(outputs)
# this function will be registered in torch.jit.frontend module by default
assign_str_rhs = "torch.jit.frontend.{}(".format(ignore_function_name) + build_args(inputs) + ")"
if len(outputs) > 0:
assign_str = assign_str_lhs + " = " + assign_str_rhs
else:
assign_str = assign_str_rhs
assign_ast = ast.parse(assign_str).body[0]
return assign_ast
def get_default_args(fn):
if fn is None:
return {}
signature = inspect.signature(fn)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
def get_default_args_for_class(cls):
"""
Get default arguments for all methods in a class (except for static methods).
Args:
cls: type - The class type to inspect for default arguments.
Returns:
A Dict[str, Dict[str, Any]] which maps each method name to a Dict[str, Any]
that maps each argument name to its default value.
"""
# Get methods (except static methods because those are compiled separately as
# if they were independent script functions).
methods = inspect.getmembers(
cls,
predicate=lambda m: (inspect.ismethod(m) or inspect.isfunction(m))
and not is_static_fn(cls, m.__name__)
and m.__name__ in cls.__dict__
)
# Get method defaults. Property defaults do not need to be considered
# because setters cannot be invoked without a value.
defaults = {method_name: get_default_args(method_impl) for method_name, method_impl in methods}
return defaults
class WithItemBuilder(Builder):
@staticmethod
def build_withitem(ctx, item):
lineno = item.context_expr.lineno
start = item.context_expr.col_offset
end = start + len(pretty_node_names[ast.With])
op_vars = item.optional_vars
r = ctx.make_range(lineno, start, end)
return WithItem(r, build_expr(ctx, item.context_expr), build_expr(ctx, op_vars) if op_vars else None)
class StmtBuilder(Builder):
augassign_map = {
ast.Add: '+',
ast.Sub: '-',
ast.Mult: '*',
ast.Div: '/',
ast.Mod: '%',
ast.BitOr: '|',
ast.BitAnd: '&',
ast.BitXor: '^',
ast.LShift: '<<',
ast.RShift: '>>',
ast.Pow: '**',
}
@staticmethod
def build_Expr(ctx, stmt):
value = stmt.value
if value.__class__.__name__ == 'Str':
# If a statement is a string literal expression,
# then it is a docstring. Just ignore it.
return None
else:
return ExprStmt(build_expr(ctx, value))
@staticmethod
def build_Assign(ctx, stmt):
rhs = build_expr(ctx, stmt.value)
lhs = [build_expr(ctx, x) for x in stmt.targets]
return Assign(lhs, rhs)
@staticmethod
def build_AnnAssign(ctx, stmt):
if stmt.value is None:
raise UnsupportedNodeError(ctx, stmt, reason='without assigned value')
# Disallow type annotations on instance attributes outside of __init__
if type(stmt.target) == ast.Attribute and \
stmt.target.value.id == "self" and ctx.funcname != "__init__": # type: ignore[attr-defined]
start = stmt.col_offset
end = start + len(f"self.{stmt.target.attr}")
if hasattr(stmt.annotation, 'id'):
end += len(f": {stmt.annotation.id}")
sr = ctx.make_range(stmt.lineno, start, end)
raise ValueError("Type annotations on instance attributes must be declared in "
f"__init__, not '{ctx.funcname}': {sr}")
rhs = build_expr(ctx, stmt.value)
lhs = build_expr(ctx, stmt.target)
the_type = build_expr(ctx, stmt.annotation)
return Assign([lhs], rhs, the_type)
@staticmethod
def build_Delete(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("del"))
return Delete(r, [build_expr(ctx, target) for target in stmt.targets])
@staticmethod
def build_Return(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("return"))
return Return(r, None if stmt.value is None else build_expr(ctx, stmt.value))
@staticmethod
def build_Raise(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("raise"))
expr = build_expr(ctx, stmt.exc)
return Raise(r, expr)
@staticmethod
def build_Assert(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("assert"))
test = build_expr(ctx, stmt.test)
msg = build_expr(ctx, stmt.msg) if stmt.msg is not None else None
return Assert(r, test, msg)
@staticmethod
def build_AugAssign(ctx, stmt):
lhs = build_expr(ctx, stmt.target)
rhs = build_expr(ctx, stmt.value)
op = type(stmt.op)
if op in StmtBuilder.augassign_map:
op_token = StmtBuilder.augassign_map[op]
else:
raise NotSupportedError(
find_before(ctx, rhs.range().start, '=', offsets=(-1, 0)),
"unsupported kind of augumented assignment: " + op.__name__)
return AugAssign(lhs, op_token, rhs)
@staticmethod
def build_While(ctx, stmt):
if stmt.orelse:
# TODO: try to recover the location of else:? Python doesn't give us useful
# annotations in this case
raise NotSupportedError(None, "else branches of while loops aren't supported")
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("while"))
return While(r, build_expr(ctx, stmt.test),
build_stmts(ctx, stmt.body))
@staticmethod
def build_For(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("for"))
if stmt.orelse:
raise NotSupportedError(r, "else branches of for loops aren't supported")
return For(
r, [build_expr(ctx, stmt.target)],
[build_expr(ctx, stmt.iter)], build_stmts(ctx, stmt.body))
@staticmethod
def build_If(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("if"))
return If(r, build_expr(ctx, stmt.test),
build_stmts(ctx, stmt.body),
build_stmts(ctx, stmt.orelse))
@staticmethod
def build_Print(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("print"))
if stmt.dest:
raise NotSupportedError(r, "print statements with non-default destinations aren't supported")
args = [build_expr(ctx, val) for val in stmt.values]
return ExprStmt(Apply(Var(Ident(r, "print")), args, []))
@staticmethod
def build_Pass(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("pass"))
return Pass(r)
@staticmethod
def build_Break(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("break"))
return Break(r)
@staticmethod
def build_Continue(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("continue"))
return Continue(r)
@staticmethod
def build_With(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset + len("with"))
# Handle ignore context manager
if is_torch_jit_ignore_context_manager(stmt):
if not _IS_ASTUNPARSE_INSTALLED:
raise RuntimeError("torch.jit._IgnoreContextManager requires installing Python library `astunparse`,\
please install it in your Python environment")
assign_ast = build_ignore_context_manager(ctx, stmt)
return build_stmt(ctx, assign_ast)
return With(r, build_withitems(ctx, stmt.items), build_stmts(ctx, stmt.body))
class ExprBuilder(Builder):
binop_map = {
ast.Add: '+',
ast.Sub: '-',
ast.Mult: '*',
ast.Div: '/',
ast.Pow: '**',
ast.Mod: '%',
ast.FloorDiv: '//',
ast.BitAnd: '&',
ast.BitXor: '^',
ast.BitOr: '|',
ast.LShift: '<<',
ast.RShift: '>>',
}
binop_map[ast.MatMult] = '@'
unop_map = {
ast.Not: 'not',
ast.USub: '-',
ast.Invert: '~',
}
boolop_map = {
ast.And: 'and',
ast.Or: 'or',
}
cmpop_map = {
ast.Eq: '==',
ast.NotEq: '!=',
ast.LtE: '<=',
ast.Lt: '<',
ast.GtE: '>=',
ast.Gt: '>',
ast.Is: 'is',
ast.IsNot: 'is not',
ast.In: 'in',
ast.NotIn: 'not in',
}
@staticmethod
def build_Attribute(ctx, expr):
base = build_expr(ctx, expr.value)
# expr.attr is just a string, so it's not annotated in any way, so we have
# to build the range manually
source = ctx.source.encode('utf-8')
def get_char(index):
return chr(source[index])
start_pos = base.range().end + 1
while get_char(start_pos) in string.whitespace: # Skip whitespace
start_pos += 1
end_pos = start_pos + len(expr.attr)
name_range = ctx.make_raw_range(start_pos, end_pos)
return Select(base, Ident(name_range, expr.attr))
@staticmethod
def build_Call(ctx, expr):
func = build_expr(ctx, expr.func)
args = [build_expr(ctx, py_arg) for py_arg in expr.args]
if hasattr(expr, 'starargs') and expr.starargs:
stararg_expr = build_expr(ctx, expr.starargs)
args += [Starred(stararg_expr.range(), stararg_expr)]
kwargs = []
for kw in expr.keywords:
kw_expr = build_expr(ctx, kw.value)
# XXX: we could do a better job at figuring out the range for the name here
if not kw.arg:
raise NotSupportedError(kw_expr.range(), 'keyword-arg expansion is not supported')
kwargs.append(Attribute(Ident(kw_expr.range(), kw.arg), kw_expr))
return Apply(func, args, kwargs)
@staticmethod
def build_Ellipsis(ctx, expr):
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 3) # len("...") == 3
return Dots(r)
@staticmethod
def build_Name(ctx, expr):
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(expr.id))
if expr.id.startswith(_reserved_prefix):
raise NotSupportedError(r, "names of variables used in JIT-ed functions "
"can't start with " + _reserved_prefix)
if expr.id == "True":
return TrueLiteral(r)
elif expr.id == "False":
return FalseLiteral(r)
elif expr.id == "None":
return NoneLiteral(r)
elif expr.id == "Ellipsis":
return Dots(r)
return Var(Ident(r, expr.id))
@staticmethod
def build_NameConstant(ctx, expr):
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(str(expr.value)))
if expr.value is True:
return TrueLiteral(r)
elif expr.value is False:
return FalseLiteral(r)
elif expr.value is None:
return NoneLiteral(r)
elif expr.value == Ellipsis:
return Dots(r)
else:
raise ValueError("Name constant value unsupported: " + str(expr.value))
@staticmethod
def build_BinOp(ctx, expr):
lhs = build_expr(ctx, expr.left)
rhs = build_expr(ctx, expr.right)
op = type(expr.op)
if op == ast.Div and not ctx.uses_true_division:
err_range = ctx.make_raw_range(lhs.range().end, rhs.range().start)
raise FrontendError(err_range, 'Division of ints in TorchScript uses Python 3 true '
'division semantics. Please put `from __future__ '
'import division` at the top of your file')
op_token = ExprBuilder.binop_map.get(op)
if op_token is None:
err_range = ctx.make_raw_range(lhs.range().end, rhs.range().start)
raise NotSupportedError(err_range, "unsupported binary operator: " + op.__name__)
return BinOp(op_token, lhs, rhs)
@staticmethod
def build_UnaryOp(ctx, expr):
sub_expr = build_expr(ctx, expr.operand)
op = type(expr.op)
op_token = ExprBuilder.unop_map.get(op)
if op_token is None:
raise NotSupportedError(expr.range(), "unsupported unary operator: " + op.__name__)
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(op_token))
return UnaryOp(r, op_token, sub_expr)
@staticmethod
def build_BoolOp(ctx, expr):
if len(expr.values) < 2:
raise AssertionError("expected at least 2 values in BoolOp, but got " + str(len(expr.values)))
sub_exprs = [build_expr(ctx, sub_expr) for sub_expr in expr.values]
op = type(expr.op)
op_token = ExprBuilder.boolop_map.get(op)
if op_token is None:
err_range = ctx.make_raw_range(sub_exprs[0].range().end, sub_exprs[1].range().start)
raise NotSupportedError(err_range, "unsupported boolean operator: " + op.__name__)
lhs = sub_exprs[0]
for rhs in sub_exprs[1:]:
lhs = BinOp(op_token, lhs, rhs)
return lhs
@staticmethod
def build_IfExp(ctx, expr):
return TernaryIf(build_expr(ctx, expr.test),
build_expr(ctx, expr.body),
build_expr(ctx, expr.orelse))
@staticmethod
def build_Compare(ctx, expr):
operands = [build_expr(ctx, e) for e in [expr.left] + list(expr.comparators)]
result = None
for lhs, op_, rhs in zip(operands, expr.ops, operands[1:]):
op = type(op_)
op_token = ExprBuilder.cmpop_map.get(op)
r = ctx.make_raw_range(lhs.range().end, rhs.range().start)
if op_token is None:
raise NotSupportedError(r, "unsupported comparison operator: " + op.__name__)
if op == ast.NotIn:
# NB: `not in` is just `not( in )`, so we don't introduce new tree view
# but just make it a nested call in our tree view structure
in_expr = BinOp('in', lhs, rhs)
cmp_expr = UnaryOp(r, 'not', in_expr)
else:
cmp_expr = BinOp(op_token, lhs, rhs)
if result is None:
result = cmp_expr
else:
result = BinOp('and', result, cmp_expr)
return result
@staticmethod
def build_Subscript(ctx, expr):
def build_SliceExpr(ctx, base, slice_expr):
lower = build_expr(ctx, slice_expr.lower) if slice_expr.lower is not None else None
upper = build_expr(ctx, slice_expr.upper) if slice_expr.upper is not None else None
step = build_expr(ctx, slice_expr.step) if slice_expr.step is not None else None
return SliceExpr(base.range(), lower, upper, step)
def build_Index(ctx, base, index_expr):
if isinstance(index_expr.value, ast.Tuple):
raise NotSupportedError(base.range(),
"slicing multiple dimensions with "
"tuples not supported yet")
return build_expr(ctx, index_expr.value)
def build_ExtSlice(ctx, base, extslice):
sub_exprs = []
for expr in extslice.dims:
sub_type = type(expr)
if sub_type is ast.Index:
sub_exprs.append(build_Index(ctx, base, expr))
elif sub_type is ast.Slice:
sub_exprs.append(build_SliceExpr(ctx, base, expr))
elif sub_type is ast.Ellipsis:
sub_exprs.append(Dots(base.range()))
else:
raise NotSupportedError(base.range(),
"slicing multiple dimensions with "
"{} not supported".format(sub_type))
return sub_exprs
base = build_expr(ctx, expr.value)
sub_type = type(expr.slice)
if sub_type is ast.Index:
if isinstance(expr.slice.value, ast.Tuple):
# N-dimensional indexing using Tuple: x[(i, j, k)] is equivalent to x[i, j, k]
# XXX: Indexing using a list is **different**! It triggers advanced indexing.
indices = [build_expr(ctx, index_expr) for index_expr in expr.slice.value.elts]
if not indices:
# `col_offset` is an int, but `end_col_offset` is
# `Optional[int]`. The magic number is here to make
# sure we can parse `()` on any machine
r = ctx.make_range(expr.lineno,
expr.slice.value.col_offset,
expr.slice.value.col_offset + 2)
tup = TupleLiteral(r, [])
indices.append(tup)
return Subscript(base, indices)
else:
return Subscript(base, [build_expr(ctx, expr.slice.value)])
elif sub_type is ast.Slice:
return Subscript(base, [build_SliceExpr(ctx, base, expr.slice)])
elif sub_type is ast.ExtSlice:
return Subscript(base, build_ExtSlice(ctx, base, expr.slice))
elif sys.version_info >= (3, 9): # In Python3.9 array indicies are not wrapped in ast.Index
if sub_type is ast.Tuple:
# N-dimensional indexing using Tuple: x[(i, j, k)] is equivalent to x[i, j, k]
indices = []
for index_expr in expr.slice.elts:
if isinstance(index_expr, ast.Slice):
indices.append(build_SliceExpr(ctx, base, index_expr))
else:
indices.append(build_expr(ctx, index_expr))
# Special-case logic for `typing.Tuple[()]`
if not indices:
# See note above r.e. magic number
r = ctx.make_range(expr.lineno,
expr.slice.col_offset,
expr.slice.col_offset + 2)
tup = TupleLiteral(r, [])
indices.append(tup)
return Subscript(base, indices)
return Subscript(base, [build_expr(ctx, expr.slice)])
else: # Ellipsis (can only happen in Python 2)
raise NotSupportedError(base.range(), "ellipsis is not supported")
@staticmethod
def build_List(ctx, expr):
return ListLiteral(ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1),
[build_expr(ctx, e) for e in expr.elts])
@staticmethod
def build_Tuple(ctx, expr):
return TupleLiteral(ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1),
[build_expr(ctx, e) for e in expr.elts])
@staticmethod
def build_Dict(ctx, expr):
range = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1)
if expr.keys and not expr.keys[0]:
raise NotSupportedError(range, "Dict expansion (e.g. `{**dict}`) is not supported")
return DictLiteral(range, [build_expr(ctx, e) for e in expr.keys],
[build_expr(ctx, e) for e in expr.values])
@staticmethod
def build_Num(ctx, expr):
value = str(expr.n)
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(value))
return Const(r, value)
@staticmethod
def build_Constant(ctx, expr):
value = expr.value
if value is None or isinstance(value, bool):
# NB: this check has to happen before the int check because bool is
# a subclass of int
return ExprBuilder.build_NameConstant(ctx, expr)
if isinstance(value, (int, float, complex)):
return ExprBuilder.build_Num(ctx, expr)
elif isinstance(value, str):
return ExprBuilder.build_Str(ctx, expr)
elif isinstance(value, type(Ellipsis)):
return ExprBuilder.build_Ellipsis(ctx, expr)
else:
error_range = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(str(value)))
raise FrontendError(error_range, "Unknown Constant expression type")
@staticmethod
def build_Str(ctx, expr):
value = str(expr.s)
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + len(value) + 1)
return StringLiteral(r, value)
@staticmethod
def build_JoinedStr(ctx, expr):
s = ''
args = []
for value in expr.values:
r = ctx.make_range(value.lineno, value.col_offset, value.col_offset + 1)
if isinstance(value, ast.FormattedValue):
if value.conversion != -1:
raise NotSupportedError(r, 'Don\'t support conversion in JoinedStr')
if value.format_spec is not None:
raise NotSupportedError(r, 'Don\'t support formatting in JoinedStr')
s += '{}'
args.append(build_expr(ctx, value.value))
elif isinstance(value, ast.Str):
s += value.s
else:
raise NotSupportedError(r, 'Unsupported value in JoinedStr')
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1)
return Apply(Select(StringLiteral(r, s), Ident(r, 'format')), args, [])
@staticmethod
def build_ListComp(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset)
if (len(stmt.generators) != 1):
raise NotSupportedError(r, "Only a single generator is currently supported")
if (len(stmt.generators[0].ifs) != 0):
raise NotSupportedError(r, "Comprehension ifs are not supported yet")
elt_expr = build_expr(ctx, stmt.elt)
target_expr = build_expr(ctx, stmt.generators[0].target)
iter_expr = build_expr(ctx, stmt.generators[0].iter)
return ListComp(r, elt_expr, target_expr, iter_expr)
@staticmethod
def build_GeneratorExp(ctx, stmt):
# Convert Generator expression to ListComp
return ExprBuilder.build_ListComp(ctx, stmt)
@staticmethod
def build_DictComp(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, stmt.col_offset)
if (len(stmt.generators) != 1):
raise NotSupportedError(r, "Only a single generator is currently supported")
if (len(stmt.generators[0].ifs) != 0):
raise NotSupportedError(r, "Comprehension ifs are not supported yet")
key_expr = build_expr(ctx, stmt.key)
value_expr = build_expr(ctx, stmt.value)
target_expr = build_expr(ctx, stmt.generators[0].target)
iter_expr = build_expr(ctx, stmt.generators[0].iter)
return DictComp(r, key_expr, value_expr, target_expr, iter_expr)
@staticmethod
def build_Starred(ctx, expr):
r = ctx.make_range(expr.lineno, expr.col_offset, expr.col_offset + 1)
return Starred(r, build_expr(ctx, expr.value))
build_expr = ExprBuilder()
build_stmt = StmtBuilder()
build_withitem = WithItemBuilder()
def find_before(ctx, pos, substr, offsets=(0, 0)):
new_pos = ctx.source[:pos].rindex(substr)
return ctx.make_raw_range(new_pos + offsets[0], new_pos + len(substr) + offsets[1])
| pytorch-master | torch/jit/frontend.py |
import torch.jit
from torch.jit._builtins import _find_builtin
import inspect
import textwrap
# this file is for generating documentation using sphinx autodoc
# > help(torch.jit.supported_ops) will also give a nice listed of the
# supported ops programmatically
def _hidden(name):
return name.startswith('_') and not name.startswith('__')
def _emit_type(type):
return str(type)
def _emit_arg(indent, i, arg):
v = "{} : {}".format(arg.name, _emit_type(arg.type))
default = arg.default_value
if default is not None:
v = "{}={}".format(v, str(default))
if i > 0:
v = "\n{}{}".format(" " * indent, v)
return v
def _emit_args(indent, arguments):
return ",".join(_emit_arg(indent, i, arg) for i, arg in enumerate(arguments))
def _emit_ret(ret):
return _emit_type(ret.type)
def _emit_rets(returns):
if len(returns) == 1:
return _emit_ret(returns[0])
return "Tuple[{}]".format(", ".join(_emit_ret(r) for r in returns))
def _emit_schema(mod, name, schema, arg_start=0, padding=4):
if mod is None:
qualified_name = name
else:
qualified_name = "{}.{}".format(mod, name)
schema_str = "{}({}) -> {}".format(qualified_name,
_emit_args(len(qualified_name) + 1 + padding, schema.arguments[arg_start:]),
_emit_rets(schema.returns))
return schema_str
def _get_tensor_ops():
def is_tensor_method(schema):
if len(schema.arguments) == 0:
return False
self = schema.arguments[0]
if self.name != 'self':
return False
if not self.type.isSubtypeOf(torch._C.TensorType.get()):
return False
return True
methods = []
# discover methods
for elem in dir(torch.Tensor):
if not _hidden(elem):
schemas = torch._C._jit_get_schemas_for_operator("aten::" + elem)
for schema in schemas:
if is_tensor_method(schema):
methods.append(_emit_schema('Tensor', elem, schema, arg_start=1))
return "Supported Tensor Methods", methods
def _get_nn_functional_ops():
functions = []
# Iterate over torch.nn.functional
mod = torch.nn.functional
name = mod.__name__
for elem in dir(torch.nn.functional):
attr = getattr(mod, elem)
if not inspect.isfunction(attr) or _hidden(elem[0]):
# Ignore non-functions and internal methods
continue
attr_module = inspect.getmodule(attr)
if not attr_module:
raise RuntimeError(f'Module for {attr} not found')
if 'torch.nn.functional' not in attr_module.__name__:
# Ignore functions from outside torch.nn.functional
continue
try:
# compile fn, get schema
scripted = torch.jit.script(attr)
schema = scripted.schema
functions.append(_emit_schema(name, elem, schema))
except: # noqa: B001,E722
# Skip interpolate / boolean dispatched things
pass
# Iterate over modules that we know contain a lot of builtins
for mod in torch.jit._builtins._modules_containing_builtins:
name = mod.__name__
for elem in dir(mod):
builtin = _find_builtin(getattr(mod, elem))
if builtin is not None:
schemas = torch._C._jit_get_schemas_for_operator(builtin)
for schema in schemas:
# remove _tan but not __and__
if not _hidden(elem):
functions.append(_emit_schema(name, elem, schema))
return "Supported PyTorch Functions", functions
def _get_builtins_helper():
builtins = []
for fn, _builtin_name in torch.jit._builtins._builtin_ops:
mod = inspect.getmodule(fn)
if not hasattr(fn, '__name__'):
# typing classes
continue
if not mod:
continue
if _hidden(fn.__name__) or _hidden(fn.__qualname__) or _hidden(mod.__name__):
# skip internal-only methods
continue
if 'torch._C' in mod.__name__:
continue
builtins.append((fn, _builtin_name))
return builtins
def _is_math_fn(fn):
mod = inspect.getmodule(fn)
if not mod:
raise RuntimeError(f'Module for {fn} not found')
return mod.__name__ == 'math'
def _get_torchscript_builtins():
functions = []
builtins = filter(lambda fn: not _is_math_fn(fn[0]), _get_builtins_helper())
builtins_list = list(builtins)
# Iterate over the specially added builtins
for fn, _builtin_name in builtins_list:
mod = inspect.getmodule(fn)
if not mod:
raise RuntimeError(f'Module for {fn} not found')
builtin = _find_builtin(fn)
if builtin is not None:
schemas = torch._C._jit_get_schemas_for_operator(builtin)
for schema in schemas:
functions.append(_emit_schema(mod.__name__, fn.__name__, schema))
pass
return "TorchScript Builtin Functions", functions
def _get_math_builtins():
functions = []
builtins = filter(lambda fn: _is_math_fn(fn[0]), _get_builtins_helper())
builtins_list = list(builtins)
# Iterate over the specially added builtins
for fn, _builtin_name in builtins_list:
mod = inspect.getmodule(fn)
if not mod:
raise RuntimeError(f'Module for {fn} not found')
builtin = _find_builtin(fn)
if builtin is not None:
schemas = torch._C._jit_get_schemas_for_operator(builtin)
for schema in schemas:
schema_str = _emit_schema(mod.__name__, fn.__name__, schema)
if 'Tensor' in schema_str:
# Skip Tensor ops that have the same name as math functions
# (they will show up in the tensor methods section)
continue
functions.append(schema)
pass
return "``math`` Module", functions
def _get_global_builtins():
# Taken from the 'globals' map in torch/csrc/jit/frontend/ir_emitter.cpp
supported_builtins = [
'print',
'tuple',
'float',
'complex',
'int',
'bool',
'str',
'getattr',
'hasattr',
'isinstance',
'len',
'hex',
'oct',
'round',
'hash',
'min',
'max',
'abs',
'all',
'divmod',
'list',
'ord',
'chr',
'bin',
'range',
'zip',
'enumerate',
'sorted',
]
op_renames = {
'bool': 'aten::Bool',
'int': 'aten::Int',
'float': 'aten::Float',
'complex': 'aten::Complex',
'abs': 'prim::abs',
'max': 'prim::max',
'min': 'prim::min',
'range': 'fake::does_not_exist',
}
schemaless_op_explanations = {
'print': 'Print any value',
'tuple': 'Lists cannot be converted to tuples with this method since their size is not statically known',
'getattr': 'Attribute name must be a literal string',
'hasattr': 'Attribute name must be a literal string',
'isinstance': 'Result is static',
'zip': 'Arguments must be iterable. See :ref:`Iterables <jit_iterables>` for details.',
'enumerate': 'Arguments must be iterable. See :ref:`Iterables <jit_iterables>` for details.',
'range': 'Can only be used as an iterator in a for loop',
}
magic_methods = [
('complex', '__complex__'),
('float', '__float__'),
('int', '__int__'),
('bool', '__bool__'),
('str', '__str__'),
('len', '__len__'),
('hex', '__hex__'),
('oct', '__oct__'),
]
magic_methods_rows = []
for fn, magic_method in magic_methods:
magic_methods_rows.append('"{}", "``{}``"'.format(fn, magic_method))
schematized_ops = []
schemaless_ops = []
for fn in supported_builtins:
op_name = 'aten::{}'.format(fn)
if fn in op_renames:
op_name = op_renames[fn]
schemas = torch._C._jit_get_schemas_for_operator(op_name)
for s in schemas:
schematized_ops.append(_emit_schema(None, fn, s, padding=0))
if len(schemas) > 0:
schematized_ops.append('')
else:
table_row = '":any:`{}`", "{}"'.format(fn, schemaless_op_explanations[fn])
schemaless_ops.append(table_row)
schematized_ops_str = '\n'.join(schematized_ops)
schemaless_ops_str = '\n'.join(schemaless_ops)
magic_methods_rows_str = '\n'.join(magic_methods_rows)
schematized_ops_str = textwrap.indent(schematized_ops_str, '\t')
schemaless_ops_str = textwrap.indent(schemaless_ops_str, '\t')
magic_methods_rows_str = textwrap.indent(magic_methods_rows_str, '\t')
section = """
The functions in the following table are supported but do not have a static schema
.. csv-table::
:header: "Function", "Note"
{}
The following functions will use the corresponding magic method on :any:`TorchScript classes`
.. csv-table::
:header: "Function", "Magic Method"
{}
These built-in functions use the schema
.. rst-class:: codeblock-height-limiter
::
{}
""".format(schemaless_ops_str, magic_methods_rows_str, schematized_ops_str)
return "Python Built-in Functions", section
def _list_supported_ops():
def emit_block(decls):
return '\n.. rst-class:: codeblock-height-limiter\n\n::\n\n{}\n'.format(''.join(' {}\n\n'.format(d) for d in decls))
body = ''
op_gathering_fns = (
_get_tensor_ops,
_get_nn_functional_ops,
_get_torchscript_builtins,
_get_global_builtins,
_get_math_builtins,
)
for fn in op_gathering_fns:
header, items = fn()
link_target = header.replace('`', '').replace('-', '').lower().replace(' ', '-')
if isinstance(items, str):
section = "{}\n{}\n{}\n".format(header, '~' * len(header), items)
else:
section = "{}\n{}\n{}".format(header, '~' * len(header), emit_block(items))
section = '.. _{}:'.format(link_target) + '\n\n' + section
body += section
return body
__doc__ = _list_supported_ops()
| pytorch-master | torch/jit/supported_ops.py |
import torch._C
from contextlib import contextmanager
from typing import Iterator, Any
import warnings
from torch.utils import set_module
# These are imported so users can access them from the `torch.jit` module
from torch._jit_internal import (
Final,
Future,
_IgnoreContextManager,
_overload,
_overload_method,
ignore,
_isinstance,
is_scripting,
export,
unused,
)
from torch.jit._script import (
script,
Attribute,
ScriptModule,
script_method,
RecursiveScriptClass,
RecursiveScriptModule,
ScriptWarning,
interface,
CompilationUnit,
ScriptFunction,
_ScriptProfile,
_unwrap_optional,
)
from torch.jit._trace import (
trace,
trace_module,
TracedModule,
TracerWarning,
TracingCheckError,
is_tracing,
ONNXTracedModule,
TopLevelTracedModule,
_unique_state_dict,
_flatten,
_script_if_tracing,
_get_trace_graph,
)
from torch.jit._async import fork, wait
from torch.jit._decomposition_utils import _register_decomposition
from torch.jit._serialization import (
save,
load,
jit_module_from_flatbuffer,
save_jit_module_to_flatbuffer,
)
from torch.jit._fuser import optimized_execution, fuser, last_executed_optimized_graph, set_fusion_strategy
from torch.jit._freeze import freeze, optimize_for_inference, run_frozen_optimizations
from torch.jit._ir_utils import _InsertPoint
# For backwards compatibility
_fork = fork
_wait = wait
_set_fusion_strategy = set_fusion_strategy
def export_opnames(m):
r"""
Generates new bytecode for a Script module and returns what the op list
would be for a Script Module based off the current code base. If you
have a LiteScriptModule and want to get the currently present
list of ops call _export_operator_list instead.
"""
return torch._C._export_opnames(m._c)
# torch.jit.Error
Error = torch._C.JITException
set_module(Error, "torch.jit")
# This is not perfect but works in common cases
Error.__name__ = "Error"
Error.__qualname__ = "Error"
# for use in python if using annotate
def annotate(the_type, the_value):
"""
This method is a pass-through function that returns `the_value`, used to hint TorchScript
compiler the type of `the_value`. It is a no-op when running outside of TorchScript.
Though TorchScript can infer correct type for most Python expressions, there are some cases where
type inference can be wrong, including:
- Empty containers like `[]` and `{}`, which TorchScript assumes to be container of `Tensor`
- Optional types like `Optional[T]` but assigned a valid value of type `T`, TorchScript would assume
it is type `T` rather than `Optional[T]`
Note that `annotate()` does not help in `__init__` method of `torch.nn.Module` subclasses because it
is executed in eager mode. To annotate types of `torch.nn.Module` attributes,
use :meth:`~torch.jit.Annotate` instead.
Example:
.. testcode::
import torch
from typing import Dict
@torch.jit.script
def fn():
# Telling TorchScript that this empty dictionary is a (str -> int) dictionary
# instead of default dictionary type of (str -> Tensor).
d = torch.jit.annotate(Dict[str, int], {})
# Without `torch.jit.annotate` above, following statement would fail because of
# type mismatch.
d["name"] = 20
.. testcleanup::
del fn
Args:
the_type: Python type that should be passed to TorchScript compiler as type hint for `the_value`
the_value: Value or expression to hint type for.
Returns:
`the_value` is passed back as return value.
"""
return the_value
def script_if_tracing(fn):
"""
Compiles ``fn`` when it is first called during tracing. ``torch.jit.script``
has a non-negligible start up time when it is first called due to
lazy-initializations of many compiler builtins. Therefore you should not use
it in library code. However, you may want to have parts of your library work
in tracing even if they use control flow. In these cases, you should use
``@torch.jit.script_if_tracing`` to substitute for
``torch.jit.script``.
Args:
fn: A function to compile.
Returns:
If called during tracing, a :class:`ScriptFunction` created by `torch.jit.script` is returned.
Otherwise, the original function `fn` is returned.
"""
return _script_if_tracing(fn)
# for torch.jit.isinstance
def isinstance(obj, target_type):
"""
This function provides for container type refinement in TorchScript. It can refine
parameterized containers of the List, Dict, Tuple, and Optional types. E.g. ``List[str]``,
``Dict[str, List[torch.Tensor]]``, ``Optional[Tuple[int,str,int]]``. It can also
refine basic types such as bools and ints that are available in TorchScript.
Args:
obj: object to refine the type of
target_type: type to try to refine obj to
Returns:
``bool``: True if obj was successfully refined to the type of target_type,
False otherwise with no new type refinement
Example (using ``torch.jit.isinstance`` for type refinement):
.. testcode::
import torch
from typing import Any, Dict, List
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, input: Any): # note the Any type
if torch.jit.isinstance(input, List[torch.Tensor]):
for t in input:
y = t.clamp(0, 0.5)
elif torch.jit.isinstance(input, Dict[str, str]):
for val in input.values():
print(val)
m = torch.jit.script(MyModule())
x = [torch.rand(3,3), torch.rand(4,3)]
m(x)
y = {"key1":"val1","key2":"val2"}
m(y)
"""
return _isinstance(obj, target_type)
class strict_fusion(object):
"""
This class errors if not all nodes have been fused in
inference, or symbolically differentiated in training.
Example:
Forcing fusion of additions.
.. code-block:: python
@torch.jit.script
def foo(x):
with torch.jit.strict_fusion():
return x + x + x
"""
def __init__(self):
if not torch._jit_internal.is_scripting():
warnings.warn("Only works in script mode")
pass
def __enter__(self):
pass
def __exit__(self, type: Any, value: Any, tb: Any) -> None:
pass
# Context manager for globally hiding source ranges when printing graphs.
# Note that these functions are exposed to Python as static members of the
# Graph class, so mypy checks need to be skipped.
@contextmanager
def _hide_source_ranges() -> Iterator[None]:
old_enable_source_ranges = torch._C.Graph.global_print_source_ranges # type: ignore[attr-defined]
try:
torch._C.Graph.set_global_print_source_ranges(False) # type: ignore[attr-defined]
yield
finally:
torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges) # type: ignore[attr-defined]
def enable_onednn_fusion(enabled: bool):
"""
Enables or disables onednn JIT fusion based on the parameter `enabled`.
"""
torch._C._jit_set_llga_enabled(enabled)
def onednn_fusion_enabled():
"""
Returns whether onednn JIT fusion is enabled
"""
return torch._C._jit_llga_enabled()
del Any
if not torch._C._jit_init():
raise RuntimeError("JIT initialization failed")
| pytorch-master | torch/jit/__init__.py |
import contextlib
import torch
from typing import List, Tuple
@contextlib.contextmanager
def optimized_execution(should_optimize):
"""
A context manager that controls whether the JIT's executor will run
optimizations before executing a function.
"""
stored_flag = torch._C._get_graph_executor_optimize()
torch._C._set_graph_executor_optimize(should_optimize)
try:
yield
finally:
torch._C._set_graph_executor_optimize(stored_flag)
@contextlib.contextmanager
def fuser(name):
"""
A context manager that facilitates switching between
backend fusers.
Valid names:
* ``fuser0`` - enables only legacy fuser
* ``fuser1`` - enables only NNC
* ``fuser2`` - enables only nvFuser
"""
old_cpu_fuse = torch._C._jit_can_fuse_on_cpu()
old_gpu_fuse = torch._C._jit_can_fuse_on_gpu()
old_texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
old_nvfuser_state = torch._C._jit_nvfuser_enabled()
if name == 'fuser0': # legacy fuser
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
elif name == 'fuser1': # NNC
old_profiling_executor = torch._C._jit_set_profiling_executor(True)
old_profiling_mode = torch._C._get_graph_executor_optimize(True)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
torch._C._jit_set_nvfuser_enabled(False)
elif name == 'fuser2': # nvFuser
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(True)
elif name == 'none': # Turn Pytorch fuser off
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
else:
raise Exception(f"unrecognized fuser option (name: {name})")
try:
yield
finally:
if name == 'fuser1': # NNC
torch._C._jit_set_profiling_executor(old_profiling_executor)
torch._C._get_graph_executor_optimize(old_profiling_mode)
# recover the previous values
torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuse)
torch._C._jit_override_can_fuse_on_gpu(old_gpu_fuse)
torch._C._jit_set_texpr_fuser_enabled(old_texpr_fuser_state)
torch._C._jit_set_nvfuser_enabled(old_nvfuser_state)
last_executed_optimized_graph = torch._C._last_executed_optimized_graph
def _get_differentiable_graph_node(node, diff_node):
if node.kind() == 'prim::DifferentiableGraph':
diff_node.append(node)
else:
for block in node.blocks():
for n in block.nodes():
_get_differentiable_graph_node(n, diff_node)
def _graph_for(self, *args, **kwargs):
return _script_method_graph_for(self, self, *args, **kwargs)
def _script_method_graph_for(self, parent, *args, **kwargs):
try:
dbs = parent.get_debug_state()
eps = list(dbs.execution_plans.values())
assert(len(eps) == 1)
graph = eps[0].graph.copy()
# graph_executor_states for differentiable node
fw_states = eps[0].code.differentiable_op_executor_states()
diff_nodes: List[torch._C.Node] = []
for n in graph.nodes():
_get_differentiable_graph_node(n, diff_nodes)
assert(len(fw_states) == len(diff_nodes))
# swap each differentiable graph with optimized graph in their execution plan
for n, state in zip(diff_nodes, fw_states):
fw_execution_plans = list(state.execution_plans.values())
# we can only update the subgraph when there's a unique execution
# plan. Avoid assert here so we would skip the ones that can't be
# updated while try the best effort to update other nodes.
if len(fw_execution_plans) == 1:
n.g_('Subgraph', fw_execution_plans[0].graph)
return graph
except Exception:
# fallback approach, we just ran the graph and return the recorded optimized
# graph
self(*args, **kwargs)
return last_executed_optimized_graph()
def set_fusion_strategy(strategy: List[Tuple[str, int]]):
"""
Sets the type and number of specializations that can occur during fusion.
Usage: provide a list of pairs (type, depth) where type is one of "STATIC" or "DYNAMIC"
and depth is an integer.
Behavior - static vs dynamic:
In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined
based on some initial profiling runs.
In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple
shapes are possible.
In both cases, we also recompile on new striding behavior, device, or dtype.
Behavior - fallback functions & depth:
When an input doesn't match the format required by the specialized compiled op, it will run
a fallback function. Fallback functions are recursively be compiled and specialized based
on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to
limit the number of specializations that can be compiled, before giving up on recompiling and
falling back to a completely un-fused, un-specialized implementation.
The list of (type, depth) pairs controls the type of specializations and the number of
specializations. For example: [("STATIC", 2), ("DYNAMIC", 2)] indicates that the first
two specializations will use static fusions, the following two specializations will use
dynamic fusion, and any inputs that satisfy none of the 4 options will run an
unfused implementation.
NB: in the future, if more as more fusion backends are added there may be more granular
apis for specific fusers.
"""
return torch._C._jit_set_fusion_strategy(strategy)
| pytorch-master | torch/jit/_fuser.py |
import math
import cmath
import warnings
import torch
import torch.backends.cudnn as cudnn
from ..nn.modules.utils import _single, _pair, _triple, _quadruple, _list_with_default
from collections import OrderedDict
from typing import Dict, Optional
_builtin_table: Optional[Dict[int, str]] = None
_modules_containing_builtins = (torch, torch._C._nn, torch._C._fft, torch._C._linalg, torch._C._sparse, torch._C._special) # type: ignore[attr-defined] # noqa: B950
_builtin_ops = [
# Pairs of (function, op_name)
(_pair, "aten::_pair"),
(_quadruple, "aten::_quadruple"),
(_single, "aten::_single"),
(_triple, "aten::_triple"),
(_list_with_default, "aten::list_with_default"),
(OrderedDict, "aten::dict"),
(dict, "aten::dict"),
(cudnn.is_acceptable, "aten::cudnn_is_acceptable"),
(math.ceil, "aten::ceil"),
(math.copysign, "aten::copysign"),
(math.erf, "aten::erf"),
(math.erfc, "aten::erfc"),
(math.exp, "aten::exp"),
(math.expm1, "aten::expm1"),
(math.fabs, "aten::fabs"),
(math.floor, "aten::floor"),
(math.gamma, "aten::gamma"),
(math.lgamma, "aten::lgamma"),
(math.log, "aten::log"),
(math.log10, "aten::log10"),
(math.log1p, "aten::log1p"),
(math.pow, "aten::pow"),
(math.sqrt, "aten::sqrt"),
(math.isnan, "aten::isnan"),
(math.asinh, "aten::asinh"),
(math.atanh, "aten::atanh"),
(math.cosh, "aten::cosh"),
(math.sinh, "aten::sinh"),
(math.tanh, "aten::tanh"),
(math.acos, "aten::acos"),
(math.asin, "aten::asin"),
(math.atan, "aten::atan"),
(math.atan2, "aten::atan2"),
(math.cos, "aten::cos"),
(math.sin, "aten::sin"),
(math.tan, "aten::tan"),
(math.asinh, "aten::asinh"),
(math.atanh, "aten::atanh"),
(math.acosh, "aten::acosh"),
(math.fmod, "aten::fmod"),
(math.modf, "aten::modf"),
(math.factorial, "aten::factorial"),
(math.frexp, "aten::frexp"),
(math.isinf, "aten::isinf"),
(math.degrees, "aten::degrees"),
(math.radians, "aten::radians"),
(cmath.isnan, "aten::isnan"),
(cmath.isfinite, "aten::isfinite"),
(cmath.isinf, "aten::isinf"),
(cmath.phase, "aten::angle"),
(cmath.rect, "aten::polar"),
(cmath.log, "aten::log"),
(cmath.log10, "aten::log10"),
(cmath.sqrt, "aten::sqrt"),
(cmath.exp, "aten::exp"),
(cmath.sin, "aten::sin"),
(cmath.tan, "aten::tan"),
(cmath.cos, "aten::cos"),
(cmath.asin, "aten::asin"),
(cmath.acos, "aten::acos"),
(cmath.atan, "aten::atan"),
(cmath.sinh, "aten::sinh"),
(cmath.cosh, "aten::cosh"),
(cmath.tanh, "aten::tanh"),
(cmath.asinh, "aten::asinh"),
(cmath.acosh, "aten::acosh"),
(cmath.atanh, "aten::atanh"),
(math.ldexp, "aten::ldexp"),
(torch._assert, "aten::_assert"),
(torch.autograd.grad, "aten::grad"),
(torch.autograd.backward, "aten::backward"),
(torch._C._infer_size, "aten::_infer_size"),
(torch.nn.functional._no_grad_embedding_renorm_, "aten::_no_grad_embedding_renorm_"), # type: ignore[attr-defined]
(torch.nn.functional.assert_int_or_pair, "aten::_assert_int_or_pair"),
(torch.nn.init._no_grad_fill_, "aten::_no_grad_fill_"),
(torch.nn.init._no_grad_normal_, "aten::_no_grad_normal_"),
(torch.nn.init._no_grad_uniform_, "aten::_no_grad_uniform_"),
(torch.nn.init._no_grad_zero_, "aten::_no_grad_zero_"),
(torch._C._get_tracing_state, "aten::_get_tracing_state"),
(warnings.warn, "aten::warn"),
(torch._VF.stft, "aten::stft"), # type: ignore[attr-defined]
(torch._VF.istft, "aten::istft"), # type: ignore[attr-defined]
(torch._VF.cdist, "aten::cdist"), # type: ignore[attr-defined]
(torch._VF.norm, "aten::norm"), # type: ignore[attr-defined]
(torch._VF.unique_dim, "aten::unique_dim"),
(torch._VF.unique_consecutive, "aten::unique_consecutive"), # type: ignore[attr-defined]
(torch._VF.nuclear_norm, "aten::nuclear_norm"),
(torch._VF.frobenius_norm, "aten::frobenius_norm"),
(torch._VF.tensordot, "aten::tensordot"), # type: ignore[attr-defined]
]
# ops in torch.functional are bound to torch
# in these cases, we want to resolve the function to their python implementation
# instead looking up a builtin "aten::" schema
def _gen_torch_functional_registered_ops():
# eventually ops should encompass all of torch/functional.py, (torch.functional.__all__)
# but we are currently only able to compile some of the functions. additionally,
# some functions directly map to their aten:: implementations.
# TODO: add support for more ops
ops = ["stft", "istft", "lu", "cdist", "norm", "unique", "unique_consecutive", "tensordot"]
return set(getattr(torch.functional, name) for name in ops)
_functional_registered_ops = _gen_torch_functional_registered_ops()
def _is_special_functional_bound_op(fn):
return fn in _functional_registered_ops
# lazily built to ensure the correct initialization order
def _get_builtin_table():
global _builtin_table
if _builtin_table is not None:
return _builtin_table
_builtin_table = {}
def register_all(mod):
for name in dir(mod):
v = getattr(mod, name)
if callable(v) and not _is_special_functional_bound_op(v) and v is not torch.no_grad and v is not torch.autocast:
_builtin_ops.append((v, "aten::" + name))
for mod in _modules_containing_builtins:
register_all(mod)
_builtin_ops.append((math.gcd, "aten::gcd"))
_builtin_ops.append((math.isfinite, "aten::isfinite"))
_builtin_ops.append((math.remainder, "aten::mathremainder")) # type: ignore[attr-defined]
import torch.distributed.autograd as dist_autograd
if dist_autograd.is_available():
_builtin_ops.append((dist_autograd.get_gradients, "aten::get_gradients"))
_builtin_ops.append((dist_autograd.backward, "aten::dist_backward"))
# populate the _builtin_table from _builtin_ops
for builtin, aten_op in _builtin_ops:
_builtin_table[id(builtin)] = aten_op
return _builtin_table
def _register_builtin(fn, op):
_get_builtin_table()[id(fn)] = op
def _find_builtin(fn):
return _get_builtin_table().get(id(fn))
| pytorch-master | torch/jit/_builtins.py |
# Functions for synthesizing magic methods for JIT-compiled dataclasses
import os
from functools import partial
from torch._jit_internal import is_optional, FAKE_FILENAME_PREFIX
from torch._sources import ParsedDef, SourceContext
from typing import Callable, Dict, List
import ast
import dataclasses
import inspect
import sys
def _get_fake_filename(cls, method_name):
return os.path.join(FAKE_FILENAME_PREFIX, cls.__name__, method_name)
def compose_fn(cls, name: str, body_lines: List[str], signature: str) -> ParsedDef:
body = '\n'.join(f' {b}' for b in body_lines)
decl = f'def {name}{signature}:\n{body}'
# Parse the function declaration
try:
py_ast = ast.parse(decl)
except SyntaxError:
# This should only happen if there's some unforeseeable change
# in the dataclasses module that makes our synthesized code fail
raise RuntimeError(
f"TorchScript failed to synthesize dataclass method '{name}' for class '{cls.__name__}'. "
"Please file a bug report at <https://github.com/pytorch/pytorch/issues>"
)
fake_filename = _get_fake_filename(cls, name)
# Parse the function
return ParsedDef(
py_ast,
ctx=SourceContext(
source=decl,
filename=fake_filename,
file_lineno=0,
leading_whitespace_len=0
),
source=decl,
filename=fake_filename,
file_lineno=0
)
def synthesize__init__(cls) -> ParsedDef:
# Supporting default factories in the way that people expect would sort of require us to
# allow compiling lambda functions, which is not currently supported.
if any(field.default_factory is not dataclasses.MISSING for field in dataclasses.fields(cls)):
raise NotImplementedError("Default factory initializers are not supported in TorchScript dataclasses")
# Simply read off the generated __init__ signature from CPython's implementation. It'll be
# almost correct except for InitVar annotations, which we need to handle specially.
signature = inspect.signature(cls.__init__)
# Handle InitVars if needed (only works on Python 3.8+, when a `type` attribute was added to InitVar);
# see CPython commit here https://github.com/python/cpython/commit/01ee12ba35a333e8a6a25c4153c4a21838e9585c
init_vars: List[str] = []
if sys.version_info >= (3, 8):
params = []
for name, param in signature.parameters.items():
ann = param.annotation
if isinstance(ann, dataclasses.InitVar):
# The TorchScript interpreter can't handle InitVar annotations, so we unwrap the underlying type here
init_vars.append(name)
params.append(param.replace(annotation=ann.type)) # type: ignore[attr-defined]
else:
params.append(param)
signature = signature.replace(parameters=params)
body = [
# Assign all attributes to self
f'self.{field.name} = {field.name}'
for field in dataclasses.fields(cls)
if field.init and field.name not in init_vars
]
# Call user's impl of __post_init__ if it exists
if hasattr(cls, '__post_init__'):
body.append('self.__post_init__(' + ', '.join(init_vars) + ')')
return compose_fn(cls, '__init__', body or ['pass'], signature=str(signature))
# This is a placeholder at the moment since the TorchScript interpreter doesn't call __repr__
def synthesize__repr__(cls) -> ParsedDef:
return compose_fn(
cls, '__repr__',
[f"return '{cls.__name__}(" + ", ".join([
f"{field.name}=self.{field.name}"
for field in dataclasses.fields(cls) if field.repr
]) + ")'"],
signature='(self) -> str'
)
def synthesize__hash__(cls) -> ParsedDef:
return compose_fn(
cls, '__hash__',
[
# This is just a placeholder to prevent compilation from failing; this won't even get called at
# all right now because the TorchScript interpreter doesn't call custom __hash__ implementations
"raise NotImplementedError('__hash__ is not supported for dataclasses in TorchScript')"
],
signature='(self) -> int'
)
# Implementation for __eq__ and __ne__
def synthesize_equality(cls, name: str, converse: str) -> ParsedDef:
return synthesize_comparison(cls, name, allow_eq=True, raise_on_none=False, inner=[
f"if val1 {converse} val2: return False"
])
def synthesize_inequality(cls, name: str, op: str, allow_eq: bool) -> ParsedDef:
return synthesize_comparison(cls, name, allow_eq, raise_on_none=True, inner=[
f"if val1 {op} val2: return True",
f"elif val2 {op} val1: return False",
])
def synthesize_comparison(cls, name: str, allow_eq: bool, raise_on_none: bool, inner: List[str]) -> ParsedDef:
body = []
for field in dataclasses.fields(cls):
if not field.compare:
continue
body.extend([
f"val1 = self.{field.name}",
f"val2 = other.{field.name}",
])
body.extend(
inner if not is_optional(field.type) else [
# Type refinement for optional fields; we need this to avoid type errors from the interpreter
"if val1 is not None and val2 is not None:",
*[' ' + line for line in inner],
"elif (val1 is None) != (val2 is None):",
f" raise TypeError('Cannot compare {cls.__name__} with None')" if raise_on_none else " return False"
]
)
body.append(f"return {allow_eq}")
return compose_fn(cls, name, body, signature=f'(self, other: {cls.__name__}) -> bool')
DATACLASS_MAGIC_METHODS: Dict[str, Callable] = {
"__init__": synthesize__init__,
"__repr__": synthesize__repr__,
"__hash__": synthesize__hash__,
"__eq__": partial(synthesize_equality, name="__eq__", converse="!="),
"__ne__": partial(synthesize_equality, name="__ne__", converse="=="),
"__lt__": partial(synthesize_inequality, name="__lt__", op="<", allow_eq=False),
"__le__": partial(synthesize_inequality, name="__le__", op="<", allow_eq=True),
"__gt__": partial(synthesize_inequality, name="__gt__", op=">", allow_eq=False),
"__ge__": partial(synthesize_inequality, name="__ge__", op=">", allow_eq=True),
}
| pytorch-master | torch/jit/_dataclass_impls.py |
"""Tracing
This module contains functionality to support the JIT's tracing frontend, notably:
* torch.jit.trace
* torch.jit.trace_module
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
import torch
import copy
import os
import contextlib
import functools
import warnings
import inspect
import re
from typing import Any, Dict, List, Optional, Set
from torch.jit._state import _python_cu, _enabled
from torch.jit._script import ScriptModule, _CachedForward, script
from torch._jit_internal import _qualified_name, is_scripting, get_callable_argument_names
from torch.autograd import function
from torch.nn import Module
from torch.testing._comparison import default_tolerances
_flatten = torch._C._jit_flatten
_unflatten = torch._C._jit_unflatten
def _create_interpreter_name_lookup_fn(frames_up=1):
def _get_interpreter_name_for_var(var):
frame = inspect.currentframe()
if not frame:
raise RuntimeError("failed to inspect frame")
i = 0
while i < frames_up + 1:
frame = frame.f_back
if not frame:
raise RuntimeError("failed to get frame")
i += 1
f_locals = frame.f_locals
f_globals = frame.f_globals
for k, v in f_locals.items():
if isinstance(v, torch.Tensor) and var is v:
return k if k != "self" else ""
return ""
return _get_interpreter_name_for_var
def _unique_state_dict(module, keep_vars=False):
# since Parameter.detach() always creates a new torch.Tensor instance,
# id(v) doesn't work with it. So we always get the Parameter or Buffer
# as values, and deduplicate the params using Parameters and Buffers
state_dict = module.state_dict(keep_vars=True)
filtered_dict = type(state_dict)()
seen_ids: Set[int] = set()
for k, v in state_dict.items():
if id(v) in seen_ids:
continue
seen_ids.add(id(v))
if keep_vars:
filtered_dict[k] = v
else:
filtered_dict[k] = v.detach()
return filtered_dict
class ONNXTracedModule(torch.nn.Module):
def __init__(
self,
inner,
strict=True,
force_outplace=False,
return_inputs=False,
return_inputs_states=False,
):
super(ONNXTracedModule, self).__init__()
# inner may be a Module, or it may be an arbitrary callable
# If it's a Module, we get its parameters automatically, which lets
# us avoid a special casing functions versus modules.
self.inner = inner
self.strict = strict
self._force_outplace = force_outplace
self._return_inputs = return_inputs
self._return_inputs_states = return_inputs_states
def forward(self, *args: torch.Tensor):
in_vars, in_desc = _flatten(args)
# NOTE: use full state, because we need it for BatchNorm export
# This differs from the compiler path, which doesn't support it at the moment.
module_state = list(_unique_state_dict(self, keep_vars=True).values())
ret_inputs = []
inputs_states = []
outs = []
def wrapper(*args):
in_args: List[torch.Tensor] = []
for i in range(len(in_vars)):
if not isinstance(args[i], torch.Tensor):
raise RuntimeError('Expected Tensor argument')
in_args.append(args[i])
trace_inputs = _unflatten(in_args, in_desc)
ret_inputs.append(
tuple(x.clone(memory_format=torch.preserve_format) for x in args)
)
if self._return_inputs_states:
inputs_states.append(_unflatten(in_args, in_desc))
outs.append(self.inner(*trace_inputs))
if self._return_inputs_states:
inputs_states[0] = (inputs_states[0], trace_inputs)
out_vars, _ = _flatten(outs)
if len(out_vars) == 1:
return out_vars[0]
else:
return tuple(out_vars)
graph, out = torch._C._create_graph_by_tracing(
wrapper,
in_vars + module_state,
_create_interpreter_name_lookup_fn(),
self.strict,
self._force_outplace,
)
if self._return_inputs:
return graph, outs[0], ret_inputs[0]
if self._return_inputs_states:
return graph, outs[0], inputs_states[0]
else:
return graph, outs[0]
def _clone_inputs(args):
def clone_input(a):
if a is None:
return None
elif isinstance(a, torch.Tensor):
# TODO: figure out one liner to .clone() and set requires_grad
v = (
a.detach()
.clone(memory_format=None if a.is_mkldnn else torch.preserve_format)
.requires_grad_(a.requires_grad)
)
if a.grad is not None:
v.grad = clone_input(v.grad)
return v
else:
return a.clone(memory_format=torch.preserve_format)
return function._nested_map(
lambda x: isinstance(x, torch.Tensor), clone_input, condition_msg="tensors"
)(args)
# This is purely for developer debugging. We are not going to advertise it.
_JIT_TIME = os.environ.get("PYTORCH_JIT_TIME", False) # CUDA-only timing
_JIT_DISABLE = os.environ.get("PYTORCH_JIT_DISABLE", False)
_JIT_STATS = os.environ.get("PYTORCH_JIT_STATS", False)
@contextlib.contextmanager
def _time(trace_name, name, time=True):
if (not _JIT_TIME and not time) or not torch.cuda.is_available():
yield
return
stream = torch.cuda.current_stream()
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
stream.record_event(start)
try:
yield
finally:
stream.record_event(end)
end.synchronize()
print("{} {} time: {} ms".format(trace_name, name, start.elapsed_time(end)))
def verify(model, args, loss_fn=torch.sum, devices=None):
"""
Verify that a JIT compiled model has the same behavior as its uncompiled
version along with its backwards pass. If your model returns multiple
outputs, you must also specify a `loss_fn` to produce a loss for which
the backwards will be computed.
This function has side-effects (e.g., it executes your model / saves and loads
parameters), so don't expect the model to come out exactly the same as what
you passed in.
Args:
model (compiled torch.nn.Module or function): the module/function to be
verified. The module/function definition MUST have been decorated with
`@torch.jit.compile`.
args (tuple or Tensor): the positional arguments to pass to the
compiled function/module to be verified. A non-tuple is assumed to
be a single positional argument to be passed to the model.
loss_fn (function, optional): the loss function to be applied to
the output of the model, before backwards is invoked. By default,
we assume that a model returns a single result, and we :func:`torch.sum`
before calling backwards; if this is inappropriate, you can pass your
own loss function. Note that if a model returns a tuple of results,
these are passed as separate positional arguments to `loss_fn`.
devices (iterable of device IDs, optional): the GPU devices which the
compiled module will be run on. This determines the RNG state we
must save when running both compiled and uncompiled versions of the model.
"""
# TODO: In principle, we track device information in our trace, so it
# should be possible to check if our execution actually obeyed the 'devices'
# the user provided.
# TODO: Consider adding a utility function to torch.jit to test
# for this case
if not isinstance(model, torch._C.CompiledFunction): # type: ignore[attr-defined]
raise TypeError(
"Cannot verify an uncompiled module. Add @torch.jit.compile to compile it"
)
is_module = isinstance(model, Module)
if not isinstance(args, tuple):
args = (args,)
saved_args = _clone_inputs(args)
if is_module:
saved_state = copy.deepcopy(model.state_dict())
def run_fwd_bwd(args, force_trace=False, assert_compiled=False):
params = list(model.parameters()) if is_module else []
in_vars, _ = _flatten((args, params))
# We use a special API to reset the trace and compile it from scratch.
compiled_fn = model
if force_trace:
compiled_fn.clear_cache()
if assert_compiled:
hits = compiled_fn.hits
out = model(*args)
if assert_compiled and compiled_fn.hits == hits:
raise RuntimeError("failed to use the compiled function")
if not isinstance(out, tuple):
out = (out,)
if loss_fn == torch.sum and len(out) != 1:
raise ValueError(
(
"Model returns {} outputs, but default loss function "
"(torch.sum) can only handle a single output"
).format(len(out))
)
out_vars, _ = _flatten(out)
saved_outs = [
v.detach().clone(memory_format=torch.preserve_format) for v in out_vars
]
loss = loss_fn(*out)
grads = torch.autograd.grad([loss], in_vars)
# TODO: I'm not sure if the clone here is necessary but it is safer
saved_grads = [
v.detach().clone(memory_format=torch.preserve_format) for v in grads
]
return (saved_outs, saved_grads)
with torch.random.fork_rng(devices, _caller="torch.jit.verify"):
uncompiled_outs, uncompiled_grads = run_fwd_bwd(args, force_trace=True)
assert model.has_trace_for(*args)
if is_module:
model.load_state_dict(saved_state)
compiled_outs, compiled_grads = run_fwd_bwd(args, assert_compiled=True)
_verify_equal(uncompiled_outs, compiled_outs)
_verify_equal(uncompiled_grads, compiled_grads)
def _verify_equal(xs, ys):
for x, y in zip(xs, ys):
if x.sub(y).abs().max() > 1e-6:
raise RuntimeError("JIT and real computation mismatch")
def indent(s):
return "\n".join(["\t" + line for line in s.splitlines()])
class TracingCheckError(Exception):
def __init__(self, graph_diff_error, tensor_compare_error, extra_msg=None):
self.message = "Tracing failed sanity checks!\n"
if extra_msg is not None:
self.message += extra_msg + "\n"
if graph_diff_error is not None:
self.message += "ERROR: Graphs differed across invocations!\n"
self.message += indent(graph_diff_error) + "\n"
if tensor_compare_error is not None:
self.message += (
"ERROR: Tensor-valued Constant nodes differed in value "
"across invocations. This often indicates that the tracer has"
" encountered untraceable code.\n"
)
self.message += indent(tensor_compare_error) + "\n"
super(TracingCheckError, self).__init__(self.message)
# Check the traced module against a set of user-provided validation inputs
@torch.no_grad()
def _check_trace(
check_inputs,
func,
traced_func,
check_tolerance,
strict,
force_outplace,
is_trace_module,
_module_class,
):
# Note: tracing is independent of optimizations, which consume the trace
for inputs in check_inputs:
if isinstance(inputs, torch.Tensor):
inputs = (inputs,)
if is_trace_module:
copied_dict = {}
for name, data in inputs.items():
copied_dict[name] = _clone_inputs(data)
check_mod = torch.jit.trace_module(
func.__self__ if hasattr(func, "__self__") else func,
copied_dict,
check_trace=False,
strict=strict,
_force_outplace=force_outplace,
_module_class=_module_class,
_compilation_unit=torch._C.CompilationUnit(),
)
check_mod_func = check_mod._c._get_method(traced_func.name)
inputs = inputs[traced_func.name]
if isinstance(inputs, (torch.Tensor, dict)):
inputs = (inputs,)
else:
check_mod = torch.jit.trace(
func,
_clone_inputs(inputs),
check_trace=False,
strict=strict,
_force_outplace=force_outplace,
_module_class=_module_class,
)
check_mod_func = check_mod
def graph_diagnostic_info():
mod_canonicalized = torch._C._jit_pass_canonicalize(traced_func.graph)
torch._C._jit_pass_inline(mod_canonicalized)
torch._C._jit_pass_erase_shape_information(mod_canonicalized)
mod_str = str(mod_canonicalized)
mod_str = re.sub(r"___torch_mangle_[0-9]+\.", "", mod_str)
check_canonicalized = torch._C._jit_pass_canonicalize(check_mod_func.graph)
torch._C._jit_pass_inline(check_canonicalized)
torch._C._jit_pass_erase_shape_information(check_canonicalized)
check_str = str(check_canonicalized)
check_str = re.sub(r"___torch_mangle_[0-9]+\.", "", check_str)
graph_diff_errors = None
if mod_str != check_str:
import difflib
graph_diff = difflib.ndiff(
mod_str.splitlines(True), check_str.splitlines(True)
)
graph_diff_errors = "Graph diff:\n" + indent("".join(graph_diff)) + "\n"
for n_mod, n_check in zip(
mod_canonicalized.nodes(), check_canonicalized.nodes()
):
if str(n_mod) != str(n_check):
graph_diff_errors += "First diverging operator:\n"
node_diff = difflib.ndiff(
str(n_mod).splitlines(True), str(n_check).splitlines(True)
)
source_printout = (
"Node diff:\n" + indent("".join(node_diff)) + "\n"
)
mod_stack = n_mod.sourceRange()
if mod_stack:
source_printout += (
"Trace source location:\n" + indent(mod_stack) + "\n"
)
check_stack = n_check.sourceRange()
if check_stack:
source_printout += (
"Check source location:\n" + indent(check_stack) + "\n"
)
graph_diff_errors += source_printout
break # For now, only print out the first pair of nodes that diverges
tensor_compare_errors = None
# Check Tensor-valued constant nodes
for n_mod, n_check in zip(
mod_canonicalized.nodes(), check_canonicalized.nodes()
):
if n_mod.kind() != n_check.kind():
break # Graphs have already diverged
if n_mod.kind() == "prim::Constant" and not (
n_mod.mustBeNone() or n_check.mustBeNone()
):
if not n_mod.hasAttribute("value"):
continue
if n_mod.kindOf("value") != "t" or n_check.kindOf("value") != "t":
continue
mod_tensor_val = n_mod.t("value")
check_tensor_val = n_check.t("value")
try:
torch.testing.assert_close(mod_tensor_val, check_tensor_val, equal_nan=True)
except (RuntimeError, AssertionError) as e:
if tensor_compare_errors is None:
tensor_compare_errors = ""
tensor_compare_errors += "Node:\n" + indent(str(n_mod)) + "\n"
compare_stack = n_mod.sourceRange()
if compare_stack:
tensor_compare_errors += (
"Source Location:\n" + indent(compare_stack) + "\n"
)
tensor_compare_errors += "Comparison exception: " + indent(
str(e)
)
break # For now, only print the first diverging pair
return graph_diff_errors, tensor_compare_errors
def wrap_retval(x):
return x if isinstance(x, tuple) else (x,)
def run_mod_and_filter_tensor_outputs(mod, inputs, running_what):
try:
outs = wrap_retval(mod(*_clone_inputs(inputs)))
outs = [out for out in outs if isinstance(out, torch.Tensor)]
return outs
except Exception as e:
graph_diff_errors, tensor_compare_errors = graph_diagnostic_info()
msg = f"encountered an exception while running the {running_what} with test inputs.\nException:\n{indent(str(e))}"
raise TracingCheckError(
graph_diff_errors,
tensor_compare_errors,
extra_msg=msg,
) from e
has_warned = [False]
def maybe_warn_nondeterministic():
if has_warned[0]:
return
has_warned[0] = True
nondeterm_ops = [
op for op in traced_func.graph.nodes() if op.isNondeterministic()
]
if len(nondeterm_ops) > 0:
nondeterministic_ops_warning = "Trace had nondeterministic nodes. "
nondeterministic_ops_warning += (
"Did you forget call .eval() on your model? Nodes:\n"
)
nondeterministic_ops_warning += "\n".join(
[indent(str(op)) for op in nondeterm_ops][:20]
)
nondeterministic_ops_warning += (
"\nThis may cause errors in trace checking. To disable trace checking,"
" pass check_trace=False to torch.jit.trace()"
)
warnings.warn(
nondeterministic_ops_warning, category=TracerWarning, stacklevel=5
)
def compare_outputs(original, reference, match_what):
all_ok = True
for i, (orig, ref) in enumerate(zip(original, reference)):
try:
if orig.is_quantized:
orig = orig.dequantize()
if ref.is_quantized:
ref = ref.dequantize()
if orig.is_mkldnn:
orig = orig.to_dense()
if ref.is_mkldnn:
ref = ref.to_dense()
if ref.is_complex() or orig.is_complex():
torch.testing.assert_close(
orig.to(torch.cdouble),
ref.to(torch.cdouble),
rtol=check_tolerance,
atol=default_tolerances(orig, ref)[1],
equal_nan=True,
)
else:
torch.testing.assert_close(
orig.double(),
ref.double(),
rtol=check_tolerance,
atol=default_tolerances(orig, ref)[1],
equal_nan=True,
)
except AssertionError as e:
maybe_warn_nondeterministic()
warnings.warn(
"Output nr "
+ str(i + 1)
+ ". of the traced function does not match "
"the corresponding output of the "
+ match_what
+ ". Detailed error:\n"
+ str(e),
category=TracerWarning,
stacklevel=4,
)
all_ok = False
return all_ok
traced_outs = run_mod_and_filter_tensor_outputs(traced_func, inputs, "trace")
fn_outs = run_mod_and_filter_tensor_outputs(func, inputs, "Python function")
if compare_outputs(traced_outs, fn_outs, "Python function"):
check_outs = run_mod_and_filter_tensor_outputs(
check_mod_func, inputs, "repeated trace"
)
compare_outputs(traced_outs, check_outs, "repeated trace")
diag_info = graph_diagnostic_info()
if any(info is not None for info in diag_info):
raise TracingCheckError(*diag_info)
class TracerWarning(Warning):
@staticmethod
def ignore_lib_warnings():
# We ignore warnings from all submodules excluding the JIT, because we need them e.g. for _check_trace
warnings.filterwarnings(
"ignore", category=TracerWarning, module="torch.(?!jit)"
)
# We ignore the tracer warnings coming form inside the library, because all our shape
# checks in nn will trigger them.
TracerWarning.ignore_lib_warnings()
torch._C._tracer_warn_use_python()
def make_tuple(example_inputs):
if isinstance(example_inputs, (torch.Tensor, dict)):
return (example_inputs,)
# done primarily so that weird iterables fail here and not pybind11 code
if not isinstance(example_inputs, tuple):
return tuple(example_inputs)
return example_inputs
def make_module(mod, _module_class, _compilation_unit):
if isinstance(mod, ScriptModule):
return mod
elif torch._jit_internal.module_has_exports(mod):
infer_methods_stubs_fn = torch.jit._recursive.make_stubs_from_exported_methods
return torch.jit._recursive.create_script_module(
mod,
infer_methods_stubs_fn,
share_types=False,
is_tracing=True
)
else:
if _module_class is None:
_module_class = TopLevelTracedModule
return _module_class(mod, _compilation_unit=_compilation_unit)
def wrap_check_inputs(check_inputs):
if check_inputs is None:
return None
return [{"forward": c} for c in check_inputs]
def trace(
func,
example_inputs,
optimize=None,
check_trace=True,
check_inputs=None,
check_tolerance=1e-5,
strict=True,
_force_outplace=False,
_module_class=None,
_compilation_unit=_python_cu,
):
"""
Trace a function and return an executable or :class:`ScriptFunction`
that will be optimized using just-in-time compilation. Tracing is ideal for
code that operates only on ``Tensor``\\s and lists, dictionaries, and
tuples of ``Tensor``\\s.
Using `torch.jit.trace` and `torch.jit.trace_module`, you can turn an
existing module or Python function into a TorchScript
:class:`ScriptFunction` or :class:`ScriptModule`. You must provide example
inputs, and we run the function, recording the operations performed on all
the tensors.
* The resulting recording of a standalone function produces `ScriptFunction`.
* The resulting recording of `nn.Module.forward` or `nn.Module` produces
`ScriptModule`.
This module also contains any parameters that the original
module had as well.
Warning:
Tracing only correctly records functions and modules which are not data
dependent (e.g., do not have conditionals on data in tensors) and do not have
any untracked external dependencies (e.g., perform input/output or
access global variables). Tracing only records operations done when the given
function is run on the given tensors. Therefore, the returned
`ScriptModule` will always run the same traced graph on any input. This
has some important implications when your module is expected to run
different sets of operations, depending on the input and/or the module
state. For example,
* Tracing will not record any control-flow like if-statements or loops.
When this control-flow is constant across your module, this is fine
and it often inlines the control-flow decisions. But sometimes the
control-flow is actually part of the model itself. For instance, a
recurrent network is a loop over the (possibly dynamic) length of an
input sequence.
* In the returned :class:`ScriptModule`, operations that have different
behaviors in ``training`` and ``eval`` modes will always behave as if
it is in the mode it was in during tracing, no matter which mode the
`ScriptModule` is in.
In cases like these, tracing would not be appropriate and
:func:`scripting <torch.jit.script>` is a better choice. If you trace
such models, you may silently get incorrect results on subsequent
invocations of the model. The tracer will try to emit warnings when
doing something that may cause an incorrect trace to be produced.
Args:
func (callable or torch.nn.Module): A Python function or `torch.nn.Module`
that will be run with `example_inputs`. `func` arguments and return
values must be tensors or (possibly nested) tuples that contain
tensors. When a module is passed `torch.jit.trace`, only the
``forward`` method is run and traced (see :func:`torch.jit.trace
<torch.jit.trace_module>` for details).
example_inputs (tuple or torch.Tensor): A tuple of example inputs that
will be passed to the function while tracing. The resulting trace
can be run with inputs of different types and shapes assuming the
traced operations support those types and shapes. `example_inputs`
may also be a single Tensor in which case it is automatically
wrapped in a tuple.
Keyword arguments:
check_trace (``bool``, optional): Check if the same inputs run through
traced code produce the same outputs. Default: ``True``. You might want
to disable this if, for example, your network contains non-
deterministic ops or if you are sure that the network is correct despite
a checker failure.
check_inputs (list of tuples, optional): A list of tuples of input
arguments that should be used to check the trace against what is
expected. Each tuple is equivalent to a set of input arguments that
would be specified in ``example_inputs``. For best results, pass in
a set of checking inputs representative of the space of shapes and
types of inputs you expect the network to see. If not specified,
the original ``example_inputs`` are used for checking
check_tolerance (float, optional): Floating-point comparison tolerance
to use in the checker procedure. This can be used to relax the
checker strictness in the event that results diverge numerically
for a known reason, such as operator fusion.
strict (``bool``, optional): run the tracer in a strict mode or not
(default: ``True``). Only turn this off when you want the tracer to
record your mutable container types (currently ``list``/``dict``)
and you are sure that the container you are using in your
problem is a ``constant`` structure and does not get used as
control flow (if, for) conditions.
Returns:
If `func` is `nn.Module` or ``forward`` of `nn.Module`, `trace` returns
a :class:`ScriptModule` object with a single ``forward`` method
containing the traced code. The returned `ScriptModule` will
have the same set of sub-modules and parameters as the original
``nn.Module``. If ``func`` is a standalone function, ``trace``
returns `ScriptFunction`.
Example (tracing a function):
.. testcode::
import torch
def foo(x, y):
return 2 * x + y
# Run `foo` with the provided inputs and record the tensor operations
traced_foo = torch.jit.trace(foo, (torch.rand(3), torch.rand(3)))
# `traced_foo` can now be run with the TorchScript interpreter or saved
# and loaded in a Python-free environment
Example (tracing an existing module)::
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Conv2d(1, 1, 3)
def forward(self, x):
return self.conv(x)
n = Net()
example_weight = torch.rand(1, 1, 3, 3)
example_forward_input = torch.rand(1, 1, 3, 3)
# Trace a specific method and construct `ScriptModule` with
# a single `forward` method
module = torch.jit.trace(n.forward, example_forward_input)
# Trace a module (implicitly traces `forward`) and construct a
# `ScriptModule` with a single `forward` method
module = torch.jit.trace(n, example_forward_input)
"""
if not _enabled:
return func
if optimize is not None:
warnings.warn(
"`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
)
if isinstance(func, torch.jit.ScriptModule):
# it is hard to trace it because the forward method on ScriptModule is already defined, so it
# would result in an error.
warnings.warn(
"The input to trace is already a ScriptModule, tracing it is a no-op. Returning the object as is."
)
return func
if isinstance(func, torch.nn.Module):
return trace_module(
func,
{"forward": example_inputs},
None,
check_trace,
wrap_check_inputs(check_inputs),
check_tolerance,
strict,
_force_outplace,
_module_class,
)
if (
hasattr(func, "__self__")
and isinstance(func.__self__, torch.nn.Module)
and func.__name__ == "forward"
):
return trace_module(
func.__self__,
{"forward": example_inputs},
None,
check_trace,
wrap_check_inputs(check_inputs),
check_tolerance,
strict,
_force_outplace,
_module_class,
)
# Special case for common case of passing a single Tensor
if isinstance(example_inputs, (torch.Tensor, dict)):
example_inputs = (example_inputs,)
# done primarily so that weird iterables fail here and not pybind11 code
elif not isinstance(example_inputs, tuple):
example_inputs = tuple(example_inputs)
var_lookup_fn = _create_interpreter_name_lookup_fn(0)
if hasattr(func, "__self__") and isinstance(func.__self__, torch.nn.Module):
raise AttributeError(
"trace doesn't support compiling individual module's functions.\n"
"Please use trace_module"
)
name = _qualified_name(func)
traced = torch._C._create_function_from_trace(
name,
func,
example_inputs,
var_lookup_fn,
strict,
_force_outplace,
get_callable_argument_names(func)
)
# Check the trace against new traces created from user-specified inputs
if check_trace:
if check_inputs is not None:
_check_trace(
check_inputs,
func,
traced,
check_tolerance,
strict,
_force_outplace,
False,
_module_class,
)
else:
_check_trace(
[example_inputs],
func,
traced,
check_tolerance,
strict,
_force_outplace,
False,
_module_class,
)
return traced
_trace_module_map: Optional[Dict[Any, Any]] = None
def trace_module(
mod,
inputs,
optimize=None,
check_trace=True,
check_inputs=None,
check_tolerance=1e-5,
strict=True,
_force_outplace=False,
_module_class=None,
_compilation_unit=_python_cu,
):
"""
Trace a module and return an executable :class:`ScriptModule` that will be optimized
using just-in-time compilation. When a module is passed to :func:`torch.jit.trace <torch.jit.trace>`, only
the ``forward`` method is run and traced. With ``trace_module``, you can specify a dictionary of
method names to example inputs to trace (see the ``inputs``) argument below.
See :func:`torch.jit.trace <torch.jit.trace>` for more information on tracing.
Args:
mod (torch.nn.Module): A ``torch.nn.Module`` containing methods whose names are
specified in ``inputs``. The given methods will be compiled
as a part of a single `ScriptModule`.
inputs (dict): A dict containing sample inputs indexed by method names in ``mod``.
The inputs will be passed to methods whose names correspond to inputs'
keys while tracing.
``{ 'forward' : example_forward_input, 'method2': example_method2_input}``
Keyword arguments:
check_trace (``bool``, optional): Check if the same inputs run through
traced code produce the same outputs. Default: ``True``. You might want
to disable this if, for example, your network contains non-
deterministic ops or if you are sure that the network is correct despite
a checker failure.
check_inputs (list of dicts, optional): A list of dicts of input arguments that should be used
to check the trace against what is expected. Each tuple
is equivalent to a set of input arguments that would
be specified in ``inputs``. For best results, pass in a
set of checking inputs representative of the space of
shapes and types of inputs you expect the network to see.
If not specified, the original ``inputs`` are used for checking
check_tolerance (float, optional): Floating-point comparison tolerance to use in the checker procedure.
This can be used to relax the checker strictness in the event that
results diverge numerically for a known reason, such as operator fusion.
Returns:
A :class:`ScriptModule` object with a single ``forward`` method containing the traced code.
When ``func`` is a ``torch.nn.Module``, the returned :class:`ScriptModule` will have the same set of
sub-modules and parameters as ``func``.
Example (tracing a module with multiple methods)::
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Conv2d(1, 1, 3)
def forward(self, x):
return self.conv(x)
def weighted_kernel_sum(self, weight):
return weight * self.conv.weight
n = Net()
example_weight = torch.rand(1, 1, 3, 3)
example_forward_input = torch.rand(1, 1, 3, 3)
# Trace a specific method and construct `ScriptModule` with
# a single `forward` method
module = torch.jit.trace(n.forward, example_forward_input)
# Trace a module (implicitly traces `forward`) and construct a
# `ScriptModule` with a single `forward` method
module = torch.jit.trace(n, example_forward_input)
# Trace specific methods on a module (specified in `inputs`), constructs
# a `ScriptModule` with `forward` and `weighted_kernel_sum` methods
inputs = {'forward' : example_forward_input, 'weighted_kernel_sum' : example_weight}
module = torch.jit.trace_module(n, inputs)
"""
if not _enabled:
return mod
if optimize is not None:
warnings.warn(
"`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
)
var_lookup_fn = _create_interpreter_name_lookup_fn(0)
if not isinstance(mod, torch.nn.Module):
raise AttributeError("expected torch.nn.Module as the first argument")
if not isinstance(inputs, dict):
raise AttributeError("expected a dictionary of (method_name, input) pairs")
old_module_map = torch.jit._trace._trace_module_map
try:
trace_module_map: Dict[Any, Any] = {}
def register_submods(mod, prefix):
for name, child in mod.named_children():
submod_qualname = prefix + "." + name
trace_module_map[child] = submod_qualname
register_submods(child, submod_qualname)
trace_module_map["__module"] = mod
torch.jit._trace._trace_module_map = trace_module_map
register_submods(mod, "__module")
module = make_module(mod, _module_class, _compilation_unit)
for method_name, example_inputs in inputs.items():
if method_name == "forward":
# "forward" is a special case because we need to trace
# `Module.__call__`, which sets up some extra tracing, but uses
# argument names of the real `Module.forward` method.
func = mod
forward_method = getattr(mod, method_name)
argument_names = get_callable_argument_names(forward_method)
else:
func = getattr(mod, method_name)
argument_names = get_callable_argument_names(func)
example_inputs = make_tuple(example_inputs)
module._c._create_method_from_trace(
method_name,
func,
example_inputs,
var_lookup_fn,
strict,
_force_outplace,
argument_names,
)
check_trace_method = module._c._get_method(method_name)
# Check the trace against new traces created from user-specified inputs
if check_trace:
if check_inputs is not None:
_check_trace(
check_inputs,
func,
check_trace_method,
check_tolerance,
strict,
_force_outplace,
True,
_module_class,
)
else:
_check_trace(
[inputs],
func,
check_trace_method,
check_tolerance,
strict,
_force_outplace,
True,
_module_class,
)
finally:
torch.jit._trace._trace_module_map = old_module_map
return module
def is_tracing():
"""
Returns ``True`` in tracing (if a function is called during the tracing of
code with ``torch.jit.trace``) and ``False`` otherwise.
"""
if is_scripting():
return False
return torch._C._is_tracing()
class TracedModule(ScriptModule):
_disable_script_meta = True
def __init__(self, orig, id_set=None, _compilation_unit=None):
# XXX: orig can be a nn.Module or a function!
super(TracedModule, self).__init__()
assert isinstance(orig, torch.nn.Module)
# Copy a subset of `orig` to a temporary nn.Module.
# This is a way to customize what will actually get compiled by create_script_module
id_set = set()
# This allows us to preserve the original module's qualified name by defining a new
# type with the attribute _jit_override_qualname. In torch._jit_internal._qualified_name
# we have a special case that will look up this attribute to override whatever qualname
# we would get from the python type system
class QualnameWrapper(torch.nn.Module):
pass
QualnameWrapper._jit_override_qualname = torch._jit_internal._qualified_name( # type: ignore[attr-defined]
type(orig)
)
tmp_module = QualnameWrapper()
def check_unique(param):
if param in id_set:
raise ValueError(
"TracedModules don't support parameter sharing between modules"
)
id_set.add(param)
tmp_module.training = orig.training
for name, param in orig._parameters.items():
if param is not None:
tmp_module._parameters[name] = param
check_unique(param)
for name, buf in orig._buffers.items():
if buf is not None:
tmp_module._buffers[name] = buf
check_unique(buf)
for name, val in orig.__dict__.items():
if (
torch._C._jit_is_script_object(val)
and name not in orig._parameters
and name not in orig._buffers
):
setattr(tmp_module, name, val)
if orig._backward_hooks:
raise ValueError(
"Modules that have backward hooks assigned can't be compiled: "
+ str(orig)
)
for name, submodule in orig._modules.items():
if submodule is None:
continue
tmp_module._modules[name] = make_module(
submodule, TracedModule, _compilation_unit=None
)
script_module = torch.jit._recursive.create_script_module(
tmp_module, lambda module: (), share_types=False, is_tracing=True
)
self.__dict__["_name"] = type(orig).__name__
self.__dict__["_actual_script_module"] = script_module
for name in ("_parameters", "_buffers", "_modules", "training"):
delattr(self, name)
def forward(self, *args, **kwargs):
raise RuntimeError("Trace submodules cannot be called.")
def __getattr__(self, attr):
if "_actual_script_module" not in self.__dict__:
return super(TracedModule, self).__getattr__(attr)
return getattr(self._actual_script_module, attr)
def __setattr__(self, attr, value):
if "_actual_script_module" not in self.__dict__:
return super(TracedModule, self).__setattr__(attr, value)
setattr(self._actual_script_module, attr, value)
def _get_name(self):
return self._name
def extra_repr(self):
return "original_name={}".format(self._name)
class TopLevelTracedModule(TracedModule):
forward = _CachedForward()
def _reconstruct(self, cpp_module):
"""
Re-construct an instance of TopLevelTracedModule using an instance of a C++ module.
Args:
cpp_module: The C++ module that this TopLevelTracedModule will be rebuilt around.
"""
self.__dict__["_actual_script_module"]._reconstruct(cpp_module)
def _script_if_tracing(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
if not is_tracing():
# Not tracing, don't do anything
return fn(*args, **kwargs)
compiled_fn = script(wrapper.__original_fn) # type: ignore[attr-defined]
return compiled_fn(*args, **kwargs)
wrapper.__original_fn = fn # type: ignore[attr-defined]
wrapper.__script_if_tracing_wrapper = True # type: ignore[attr-defined]
return wrapper
def _get_trace_graph(f, args=(), kwargs=None, strict=True, _force_outplace=False,
return_inputs=False, _return_inputs_states=False):
"""
.. warning::
This function is internal-only and should only be used by the ONNX
exporter. If you are trying to get a graph through tracing, please go
through the public API instead::
trace = torch.jit.trace(nn.LSTMCell(), (input, hidden))
trace_graph = trace.graph
Trace a function or model, returning a tuple consisting of the both the
*trace* of an execution, as well as the original return value. If return_inputs,
also returns the trace inputs as part of the tuple
Tracing is guaranteed not to change the semantics of the function/module
that is traced.
Args:
f (torch.nn.Module or function): the function or module
to be traced.
args (tuple or Tensor): the positional arguments to pass to the
function/module to be traced. A non-tuple is assumed to
be a single positional argument to be passed to the model.
kwargs (dict): the keyword arguments to pass to the function/module
to be traced.
Example (trace a cell):
.. testcode::
trace = torch.jit.trace(nn.LSTMCell(), (input, hidden))
"""
if kwargs is None:
kwargs = {}
if not isinstance(args, tuple):
args = (args,)
outs = ONNXTracedModule(f, strict, _force_outplace, return_inputs, _return_inputs_states)(*args, **kwargs)
return outs
| pytorch-master | torch/jit/_trace.py |
from torch._C import _compile_graph_to_code_table, _generate_upgraders_graph
from typing import List
def format_bytecode(table):
# given a nested tuple, convert it to nested list
def listify(content):
if not isinstance(content, tuple):
return content
return [listify(i) for i in content]
formatted_table = {}
for entry in table:
identifier = entry[0]
content = entry[1]
content = listify(content)
formatted_table[identifier] = content
return formatted_table
def generate_upgraders_bytecode() -> List:
yaml_content = []
upgraders_graph_map = _generate_upgraders_graph()
for upgrader_name, upgrader_graph in upgraders_graph_map.items():
bytecode_table = _compile_graph_to_code_table(upgrader_name, upgrader_graph)
entry = {upgrader_name: format_bytecode(bytecode_table)}
yaml_content.append(entry)
return yaml_content
if __name__ == "__main__":
raise RuntimeError("This file is not meant to be run directly")
| pytorch-master | torch/jit/generate_bytecode.py |
"""JIT-related state
This module stores various pieces of Python-global state relating to the JIT.
This is not intended to be imported directly; please the exposed
functionalities in `torch.jit`.
"""
import torch
import os
import weakref
class EnabledProxy:
"""Stores whether the JIT is enabled or not.
This is just a wrapper for a bool, so that we get reference semantics
"""
def __init__(self):
self.enabled = self.parse_env(
"PYTORCH_JIT", True, "> Using PyTorch JIT", "> PyTorch JIT DISABLED"
)
def parse_env(self, name, default, true_message, false_message):
value = os.environ.get(name)
if value is None:
return default
if value.lower() in {"1", "true", "yes"}:
return True
elif value.lower() in {"0", "false", "no"}:
return False
if value == "1v":
print(true_message)
return True
elif value == "0v":
print(false_message)
return False
raise ValueError("Unknown setting of {}. Try using 0 or 1.".format(name))
def __bool__(self):
return self.enabled
_enabled = EnabledProxy()
def disable():
_enabled.enabled = False
def enable():
_enabled.enabled = True
# The Python CompilationUnit. All functions and modules defined in Python will
# live in here. It's defined in Python because doing in cpp creates static
# destruction order issues.
_python_cu = torch._C.CompilationUnit()
# python class => ScriptClass mapping
_script_classes = {}
_name_to_pyclass = {}
def _add_script_class(python_class, script_class):
_script_classes[python_class] = script_class
_name_to_pyclass[script_class.qualified_name()] = python_class
def _get_script_class(python_class):
override = getattr(python_class, "_jit_override_qualname", None)
if override is not None:
python_class = _get_python_class(override)
return _script_classes.get(python_class, None)
def _get_python_class(qualified_name):
return _name_to_pyclass.get(qualified_name, None)
def _clear_class_state():
_script_classes.clear()
_name_to_pyclass.clear()
# Caching: we currently cache compilation of free functions and overloaded functions.
# To cache free functions we hold a weak ref to the function object and
# map to the compiled fn's qualified name.
# To cache overloaded functions we hold a weak ref to the function obj and
# map to all of its overloaded compiled fns.
# In the future we could consider caching more types of objects so that
# aliasing is preserved across separate compilations of the same object.
_jit_caching_layer: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
_jit_function_overload_caching: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
def _try_get_jit_cached_overloads(key):
qual_names = _jit_function_overload_caching.get(key, None)
if qual_names:
return [_python_cu.find_function(qual_name) for qual_name in qual_names]
else:
return None
def _set_jit_overload_cache(key, compiled_fns):
_jit_function_overload_caching[key] = [fn.qualified_name for fn in compiled_fns]
def _try_get_jit_cached_function(key):
if getattr(key, "__disable_jit_function_caching__", False) is True:
return None
qual_name = _jit_caching_layer.get(key, None)
if qual_name:
return _python_cu.find_function(qual_name)
else:
return None
def _set_jit_function_cache(key, value):
# only free functions currently supported
assert isinstance(value, torch.jit.ScriptFunction)
_jit_caching_layer[key] = value.qualified_name
| pytorch-master | torch/jit/_state.py |
"""Async API
This module contains the API for parallelism in TorchScript, notably:
* torch.jit.fork
* torch.jit.wait
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
import torch
from torch.utils import set_module
from torch.jit._builtins import _register_builtin
from torch._jit_internal import Future
set_module(Future, "torch.jit")
def fork(func, *args, **kwargs):
r"""
Creates an asynchronous task executing `func` and a reference to the value
of the result of this execution. `fork` will return immediately,
so the return value of `func` may not have been computed yet. To force completion
of the task and access the return value invoke `torch.jit.wait` on the Future. `fork` invoked
with a `func` which returns `T` is typed as `torch.jit.Future[T]`. `fork` calls can be arbitrarily
nested, and may be invoked with positional and keyword arguments.
Asynchronous execution will only occur when run in TorchScript. If run in pure python,
`fork` will not execute in parallel. `fork` will also not execute in parallel when invoked
while tracing, however the `fork` and `wait` calls will be captured in the exported IR Graph.
.. warning::
`fork` tasks will execute non-deterministically. We recommend only spawning
parallel fork tasks for pure functions that do not modify their inputs,
module attributes, or global state.
Args:
func (callable or torch.nn.Module): A Python function or `torch.nn.Module`
that will be invoked. If executed in TorchScript, it will execute asynchronously,
otherwise it will not. Traced invocations of fork will be captured in the IR.
``*args``, ``**kwargs``: arguments to invoke `func` with.
Returns:
`torch.jit.Future[T]`: a reference to the execution of `func`. The value `T`
can only be accessed by forcing completion of `func` through `torch.jit.wait`.
Example (fork a free function):
.. code-block:: python
import torch
from torch import Tensor
def foo(a : Tensor, b : int) -> Tensor:
return a + b
def bar(a):
fut : torch.jit.Future[Tensor] = torch.jit.fork(foo, a, b=2)
return torch.jit.wait(fut)
script_bar = torch.jit.script(bar)
input = torch.tensor(2)
# only the scripted version executes asynchronously
assert script_bar(input) == bar(input)
# trace is not run asynchronously, but fork is captured in IR
graph = torch.jit.trace(bar, (input,)).graph
assert "fork" in str(graph)
Example (fork a module method):
.. code-block:: python
import torch
from torch import Tensor
class AddMod(torch.nn.Module):
def forward(self, a: Tensor, b : int):
return a + b
class Mod(torch.nn.Module):
def __init__(self):
super(self).__init__()
self.mod = AddMod()
def forward(self, input):
fut = torch.jit.fork(self.mod, a, b=2)
return torch.jit.wait(fut)
input = torch.tensor(2)
mod = Mod()
assert mod(input) == torch.jit.script(mod).forward(input)
"""
return torch._C.fork(func, *args, **kwargs)
def wait(future):
r"""
Forces completion of a `torch.jit.Future[T]` asynchronous task, returning the
result of the task. See :func:`~fork` for docs and examples.
Args:
func (torch.jit.Future[T]): an asynchronous task reference, created through `torch.jit.fork`
Returns:
`T`: the return value of the the completed task
"""
return torch._C.wait(future)
_register_builtin(wait, "aten::wait")
| pytorch-master | torch/jit/_async.py |
import ast
import enum
import inspect
import re
import builtins
import torch
import warnings
from .._jit_internal import List, Tuple, is_tuple, is_list, Dict, is_dict, Optional, \
is_optional, _qualified_name, Any, Future, is_future, is_ignored_fn, Union, is_union
from .._jit_internal import BroadcastingList1, BroadcastingList2, BroadcastingList3 # type: ignore[attr-defined]
from ._state import _get_script_class
from torch._C import TensorType, TupleType, FloatType, IntType, ComplexType, \
ListType, StringType, DictType, BoolType, OptionalType, InterfaceType, AnyType, \
NoneType, DeviceObjType, StreamObjType, FutureType, EnumType, UnionType, NumberType
from textwrap import dedent
from torch._sources import get_source_lines_and_file
from typing import Type
if torch.distributed.rpc.is_available():
from .._jit_internal import RRef, is_rref
from torch._C import RRefType
from torch._ops import OpOverloadPacket
class Module(object):
def __init__(self, name, members):
self.name = name
self.members = members
def __getattr__(self, name):
try:
return self.members[name]
except KeyError:
raise RuntimeError(f"Module {self.name} has no member called {name}") from None
class EvalEnv(object):
env = {
'torch': Module('torch', {'Tensor': torch.Tensor}),
'Tensor': torch.Tensor,
'typing': Module('typing', {'Tuple': Tuple}),
'Tuple': Tuple,
'List': List,
'Dict': Dict,
'Optional': Optional,
'Union': Union,
'Future': Future
}
def __init__(self, rcb):
self.rcb = rcb
if torch.distributed.rpc.is_available():
self.env['RRef'] = RRef
def __getitem__(self, name):
if name in self.env:
return self.env[name]
if self.rcb is not None:
return self.rcb(name)
return getattr(builtins, name, None)
def get_signature(fn, rcb, loc, is_method):
if isinstance(fn, OpOverloadPacket):
signature = try_real_annotations(fn.op, loc)
else:
signature = try_real_annotations(fn, loc)
if signature is not None and is_method:
# If this is a method, then the signature will include a type for
# `self`, but type comments do not contain a `self`. So strip it
# away here so everything is consistent (`inspect.ismethod` does
# not work here since `fn` is unbound at this point)
param_types, return_type = signature
param_types = param_types[1:]
signature = (param_types, return_type)
if signature is None:
type_line, source = None, None
try:
source = dedent(''.join(get_source_lines_and_file(fn)[0]))
type_line = get_type_line(source)
except TypeError:
pass
# This might happen both because we failed to get the source of fn, or
# because it didn't have any annotations.
if type_line is not None:
signature = parse_type_line(type_line, rcb, loc)
return signature
def is_function_or_method(the_callable):
# A stricter version of `inspect.isroutine` that does not pass for built-in
# functions
return inspect.isfunction(the_callable) or inspect.ismethod(the_callable)
def is_vararg(the_callable):
if not is_function_or_method(the_callable) and hasattr(the_callable, '__call__'): # noqa: B004
# If `the_callable` is a class, de-sugar the call so we can still get
# the signature
the_callable = the_callable.__call__
if is_function_or_method(the_callable):
return inspect.getfullargspec(the_callable).varargs is not None
else:
return False
def get_param_names(fn, n_args):
if isinstance(fn, OpOverloadPacket):
fn = fn.op
if not is_function_or_method(fn) and hasattr(fn, '__call__') and is_function_or_method(fn.__call__): # noqa: B004
# De-sugar calls to classes
fn = fn.__call__
if is_function_or_method(fn):
if is_ignored_fn(fn):
fn = inspect.unwrap(fn)
return inspect.getfullargspec(fn).args
else:
# The `fn` was not a method or function (maybe a class with a __call__
# method, so use a default param name list)
return [str(i) for i in range(n_args)]
def check_fn(fn, loc):
# Make sure the function definition is not a class instantiation
try:
source = dedent(''.join(get_source_lines_and_file(fn)[0]))
except (TypeError, IOError):
return
if source is None:
return
py_ast = ast.parse(source)
if len(py_ast.body) == 1 and isinstance(py_ast.body[0], ast.ClassDef):
raise torch.jit.frontend.FrontendError(
loc, f"Cannot instantiate class '{py_ast.body[0].name}' in a script function")
if len(py_ast.body) != 1 or not isinstance(py_ast.body[0], ast.FunctionDef):
raise torch.jit.frontend.FrontendError(loc, "Expected a single top-level function")
def parse_type_line(type_line, rcb, loc):
"""Parses a type annotation specified as a comment.
Example inputs:
# type: (Tensor, torch.Tensor) -> Tuple[Tensor]
# type: (Tensor, Tuple[Tensor, Tensor]) -> Tensor
"""
arg_ann_str, ret_ann_str = split_type_line(type_line)
try:
arg_ann = eval(arg_ann_str, {}, EvalEnv(rcb)) # type: ignore[arg-type] # noqa: P204
except (NameError, SyntaxError) as e:
raise RuntimeError("Failed to parse the argument list of a type annotation") from e
if not isinstance(arg_ann, tuple):
arg_ann = (arg_ann,)
try:
ret_ann = eval(ret_ann_str, {}, EvalEnv(rcb)) # type: ignore[arg-type] # noqa: P204
except (NameError, SyntaxError) as e:
raise RuntimeError("Failed to parse the return type of a type annotation") from e
arg_types = [ann_to_type(ann, loc) for ann in arg_ann]
return arg_types, ann_to_type(ret_ann, loc)
def get_type_line(source):
"""Tries to find the line containing a comment with the type annotation."""
type_comment = '# type:'
lines = source.split('\n')
lines = [(line_num, line) for line_num, line in enumerate(lines)]
type_lines = list(filter(lambda line: type_comment in line[1], lines))
# `type: ignore` comments may be needed in JIT'ed functions for mypy, due
# to the hack in torch/_VF.py.
# An ignore type comment can be of following format:
# 1) type: ignore
# 2) type: ignore[rule-code]
# This ignore statement must be at the end of the line
# adding an extra backslash before the space, to avoid triggering
# one of the checks in .github/workflows/lint.yml
type_pattern = re.compile("# type:\\ ignore(\\[[a-zA-Z-]+\\])?$")
type_lines = list(filter(lambda line: not type_pattern.search(line[1]),
type_lines))
if len(type_lines) == 0:
# Catch common typo patterns like extra spaces, typo in 'ignore', etc.
wrong_type_pattern = re.compile("#[\t ]*type[\t ]*(?!: ignore(\\[.*\\])?$):")
wrong_type_lines = list(filter(lambda line: wrong_type_pattern.search(line[1]), lines))
if len(wrong_type_lines) > 0:
raise RuntimeError("The annotation prefix in line " + str(wrong_type_lines[0][0])
+ " is probably invalid.\nIt must be '# type:'"
+ "\nSee PEP 484 (https://www.python.org/dev/peps/pep-0484/#suggested-syntax-for-python-2-7-and-straddling-code)" # noqa: B950
+ "\nfor examples")
return None
elif len(type_lines) == 1:
# Only 1 type line, quit now
return type_lines[0][1].strip()
# Parse split up argument types according to PEP 484
# https://www.python.org/dev/peps/pep-0484/#suggested-syntax-for-python-2-7-and-straddling-code
return_line = None
parameter_type_lines = []
for line_num, line in type_lines:
if '# type: (...) -> ' in line:
return_line = (line_num, line)
break
elif type_comment in line:
parameter_type_lines.append(line)
if return_line is None:
raise RuntimeError(
"Return type line '# type: (...) -> ...' not found on multiline "
"type annotation\nfor type lines:\n" +
'\n'.join([line[1] for line in type_lines]) +
"\n(See PEP 484 https://www.python.org/dev/peps/pep-0484/#suggested-syntax-for-python-2-7-and-straddling-code)")
def get_parameter_type(line):
item_type = line[line.find(type_comment) + len(type_comment):]
return item_type.strip()
types = map(get_parameter_type, parameter_type_lines)
parameter_types = ", ".join(types)
return return_line[1].replace("...", parameter_types)
def split_type_line(type_line):
"""Splits the comment with the type annotation into parts for argument and return types.
For example, for an input of:
# type: (Tensor, torch.Tensor) -> Tuple[Tensor, Tensor]
This function will return:
("(Tensor, torch.Tensor)", "Tuple[Tensor, Tensor]")
"""
start_offset = len('# type:')
try:
arrow_pos = type_line.index('->')
except ValueError:
raise RuntimeError("Syntax error in type annotation (cound't find `->`)") from None
return type_line[start_offset:arrow_pos].strip(), type_line[arrow_pos + 2:].strip()
def try_real_annotations(fn, loc):
"""Tries to use the Py3.5+ annotation syntax to get the type."""
try:
# Note: anything annotated as `Optional[T]` will automatically
# be returned as `Union[T, None]` per
# https://github.com/python/typing/blob/master/src/typing.py#L850
sig = inspect.signature(fn)
except ValueError:
return None
all_annots = [sig.return_annotation] + [p.annotation for p in sig.parameters.values()]
if all(ann is sig.empty for ann in all_annots):
return None
arg_types = [ann_to_type(p.annotation, loc)
for p in sig.parameters.values()]
return_type = ann_to_type(sig.return_annotation, loc)
return arg_types, return_type
# Finds common type for enum values belonging to an Enum class. If not all
# values have the same type, AnyType is returned.
def get_enum_value_type(e: Type[enum.Enum], loc):
enum_values: List[enum.Enum] = list(e)
if not enum_values:
raise ValueError(f"No enum values defined for: '{e.__class__}'")
types = {type(v.value) for v in enum_values}
ir_types = [try_ann_to_type(t, loc) for t in types]
# If Enum values are of different types, an exception will be raised here.
# Even though Python supports this case, we chose to not implement it to
# avoid overcomplicate logic here for a rare use case. Please report a
# feature request if you find it necessary.
res = torch._C.unify_type_list(ir_types)
if not res:
return AnyType.get()
return res
def is_tensor(ann):
if issubclass(ann, torch.Tensor):
return True
if issubclass(ann, (torch.LongTensor, torch.DoubleTensor, torch.FloatTensor,
torch.IntTensor, torch.ShortTensor, torch.HalfTensor,
torch.CharTensor, torch.ByteTensor, torch.BoolTensor)):
warnings.warn("TorchScript will treat type annotations of Tensor "
"dtype-specific subtypes as if they are normal Tensors. "
"dtype constraints are not enforced in compilation either.")
return True
return False
def try_ann_to_type(ann, loc):
if ann is inspect.Signature.empty:
return TensorType.getInferred()
if ann is None:
return NoneType.get()
if inspect.isclass(ann) and is_tensor(ann):
return TensorType.get()
if is_tuple(ann):
# Special case for the empty Tuple type annotation `Tuple[()]`
if len(ann.__args__) == 1 and ann.__args__[0] == ():
return TupleType([])
return TupleType([try_ann_to_type(a, loc) for a in ann.__args__])
if is_list(ann):
elem_type = try_ann_to_type(ann.__args__[0], loc)
if elem_type:
return ListType(elem_type)
if is_dict(ann):
key = try_ann_to_type(ann.__args__[0], loc)
value = try_ann_to_type(ann.__args__[1], loc)
# Raise error if key or value is None
if key is None:
raise ValueError(f"Unknown type annotation: '{ann.__args__[0]}' at {loc.highlight()}")
if value is None:
raise ValueError(f"Unknown type annotation: '{ann.__args__[1]}' at {loc.highlight()}")
return DictType(key, value)
if is_optional(ann):
if issubclass(ann.__args__[1], type(None)):
contained = ann.__args__[0]
else:
contained = ann.__args__[1]
valid_type = try_ann_to_type(contained, loc)
msg = "Unsupported annotation {} could not be resolved because {} could not be resolved."
assert valid_type, msg.format(repr(ann), repr(contained))
return OptionalType(valid_type)
if is_union(ann):
# TODO: this is hack to recognize NumberType
if set(ann.__args__) == set([int, float, complex]):
return NumberType.get()
inner: List = []
# We need these extra checks because both `None` and invalid
# values will return `None`
# TODO: Determine if the other cases need to be fixed as well
for a in ann.__args__:
if a is None:
inner.append(NoneType.get())
maybe_type = try_ann_to_type(a, loc)
msg = "Unsupported annotation {} could not be resolved because {} could not be resolved."
assert maybe_type, msg.format(repr(ann), repr(maybe_type))
inner.append(maybe_type)
return UnionType(inner) # type: ignore[arg-type]
if torch.distributed.rpc.is_available() and is_rref(ann):
return RRefType(try_ann_to_type(ann.__args__[0], loc))
if is_future(ann):
return FutureType(try_ann_to_type(ann.__args__[0], loc))
if ann is float:
return FloatType.get()
if ann is complex:
return ComplexType.get()
if ann is int:
return IntType.get()
if ann is str:
return StringType.get()
if ann is bool:
return BoolType.get()
if ann is Any:
return AnyType.get()
if ann is type(None):
return NoneType.get()
if inspect.isclass(ann) and hasattr(ann, "__torch_script_interface__"):
return InterfaceType(ann.__torch_script_interface__)
if ann is torch.device:
return DeviceObjType.get()
if ann is torch.Stream:
return StreamObjType.get()
if ann is torch.dtype:
return IntType.get() # dtype not yet bound in as its own type
if inspect.isclass(ann) and issubclass(ann, enum.Enum):
if _get_script_class(ann) is None:
scripted_class = torch.jit._script._recursive_compile_class(ann, loc)
name = scripted_class.qualified_name()
else:
name = _qualified_name(ann)
return EnumType(name, get_enum_value_type(ann, loc), list(ann))
if inspect.isclass(ann):
maybe_script_class = _get_script_class(ann)
if maybe_script_class is not None:
return maybe_script_class
if torch._jit_internal.can_compile_class(ann):
return torch.jit._script._recursive_compile_class(ann, loc)
# Maybe resolve a NamedTuple to a Tuple Type
def fake_rcb(key):
return None
return torch._C._resolve_type_from_object(ann, loc, fake_rcb)
def ann_to_type(ann, loc):
the_type = try_ann_to_type(ann, loc)
if the_type is not None:
return the_type
raise ValueError(f"Unknown type annotation: '{ann}' at {loc.highlight()}")
__all__ = [
'Any',
'List',
'BroadcastingList1',
'BroadcastingList2',
'BroadcastingList3',
'Tuple',
'is_tuple',
'is_list',
'Dict',
'is_dict',
'is_optional',
'is_union',
'TensorType',
'TupleType',
'FloatType',
'ComplexType',
'IntType',
'ListType',
'StringType',
'DictType',
'AnyType',
'Module',
# TODO: Consider not exporting these during wildcard import (reserve
# that for the types; for idiomatic typing code.)
'get_signature',
'check_fn',
'get_param_names',
'parse_type_line',
'get_type_line',
'split_type_line',
'try_real_annotations',
'try_ann_to_type',
'ann_to_type',
]
| pytorch-master | torch/jit/annotations.py |
"""
Tools to help with tensor property propagation.
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
from typing import Any, List
import torch
from torch import TensorType
from torch._C import Graph
def apply_input_props_using_example(graph: Graph, example_input: List[Any]):
"""
Applies properties for each tensor in the graph inputs
using the example supplied.
"""
graph_inputs = list(graph.inputs())
if len(graph_inputs) == 0:
return
# Strip self args off for methods
in_0 = graph_inputs[0]
if isinstance(in_0.type(), torch._C.ClassType) and in_0.debugName() == "self":
graph_inputs = graph_inputs[1:]
if not len(graph_inputs) == len(example_input):
raise RuntimeError(
"Number of inputs in graph does not match number of inputs in the example")
for i, (graph_i, example_i) in enumerate(zip(graph_inputs, example_input)):
if example_i is None:
continue # Skip the type check
if isinstance(example_i, torch.Tensor) != isinstance(graph_i.type(), TensorType):
raise RuntimeError(f"Input {i} does not match type of example", graph_i, example_i)
if isinstance(example_i, torch.Tensor):
graph_i.setType(TensorType.create_from_tensor(example_i)) # type: ignore[arg-type]
| pytorch-master | torch/jit/_passes/_property_propagation.py |
pytorch-master | torch/jit/_passes/__init__.py |
|
import torch
from torch.jit._serialization import validate_map_location
import pathlib
import os
def _load_for_lite_interpreter(f, map_location=None):
r"""
Load a :class:`LiteScriptModule`
saved with :func:`torch.jit._save_for_lite_interpreter`
Args:
f: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
map_location: a string or torch.device used to dynamically remap
storages to an alternative set of devices.
Returns:
A :class:`LiteScriptModule` object.
Example:
.. testcode::
import torch
import io
# Load LiteScriptModule from saved file path
torch.jit._load_for_lite_interpreter('lite_script_module.pt')
# Load LiteScriptModule from io.BytesIO object
with open('lite_script_module.pt', 'rb') as f:
buffer = io.BytesIO(f.read())
# Load all tensors to the original device
torch.jit.mobile._load_for_lite_interpreter(buffer)
"""
if isinstance(f, str):
if not os.path.exists(f):
raise ValueError("The provided filename {} does not exist".format(f))
if os.path.isdir(f):
raise ValueError("The provided filename {} is a directory".format(f))
map_location = validate_map_location(map_location)
if isinstance(f, str) or isinstance(f, pathlib.Path):
cpp_module = torch._C._load_for_lite_interpreter(f, map_location)
else:
cpp_module = torch._C._load_for_lite_interpreter_from_buffer(f.read(), map_location)
return LiteScriptModule(cpp_module)
class LiteScriptModule(object):
def __init__(self, cpp_module):
self._c = cpp_module
super(LiteScriptModule, self).__init__()
def __call__(self, *input):
return self._c.forward(input)
def find_method(self, method_name):
return self._c.find_method(method_name)
def forward(self, *input):
return self._c.forward(input)
def run_method(self, method_name, *input):
return self._c.run_method(method_name, input)
def _export_operator_list(module: LiteScriptModule):
r"""
return a set of root operator names (with overload name) that are used by any method
in this mobile module.
"""
return torch._C._export_operator_list(module._c)
def _get_model_bytecode_version(f_input) -> int:
r"""
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
Returns:
version: An integer. If the integer is -1, the version is invalid. A warning
will show in the log.
Example:
.. testcode::
from torch.jit.mobile import _get_model_bytecode_version
# Get bytecode version from a saved file path
version = _get_model_bytecode_version("path/to/model.ptl")
"""
if isinstance(f_input, str):
if not os.path.exists(f_input):
raise ValueError(f"The provided filename {f_input} does not exist")
if os.path.isdir(f_input):
raise ValueError(f"The provided filename {f_input} is a directory")
if (isinstance(f_input, str) or isinstance(f_input, pathlib.Path)):
return torch._C._get_model_bytecode_version(str(f_input))
else:
return torch._C._get_model_bytecode_version_from_buffer(f_input.read())
def _get_mobile_model_contained_types(f_input) -> int:
r"""
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
Returns:
type_list: A set of string, like ("int", "Optional"). These are types used in bytecode.
Example:
.. testcode::
from torch.jit.mobile import _get_mobile_model_contained_types
# Get type list from a saved file path
type_list = _get_mobile_model_contained_types("path/to/model.ptl")
"""
if isinstance(f_input, str):
if not os.path.exists(f_input):
raise ValueError(f"The provided filename {f_input} does not exist")
if os.path.isdir(f_input):
raise ValueError(f"The provided filename {f_input} is a directory")
if (isinstance(f_input, str) or isinstance(f_input, pathlib.Path)):
return torch._C._get_mobile_model_contained_types(str(f_input))
else:
return torch._C._get_mobile_model_contained_types_from_buffer(f_input.read())
def _backport_for_mobile(f_input, f_output, to_version):
r"""
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
f_output: path to new model destination
to_version: the expected output model bytecode version
Returns:
success: A boolean. If backport success, return true, otherwise false
"""
if isinstance(f_input, str):
if not os.path.exists(f_input):
raise ValueError(f"The provided filename {f_input} does not exist")
if os.path.isdir(f_input):
raise ValueError(f"The provided filename {f_input} is a directory")
if ((isinstance(f_input, str) or isinstance(f_input, pathlib.Path)) and (
isinstance(f_output, str) or isinstance(f_output, pathlib.Path))):
return torch._C._backport_for_mobile(str(f_input), str(f_output), to_version)
else:
return torch._C._backport_for_mobile_from_buffer(f_input.read(), str(f_output), to_version)
def _backport_for_mobile_to_buffer(f_input, to_version):
r"""
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
"""
if isinstance(f_input, str):
if not os.path.exists(f_input):
raise ValueError(f"The provided filename {f_input} does not exist")
if os.path.isdir(f_input):
raise ValueError(f"The provided filename {f_input} is a directory")
if (isinstance(f_input, str) or isinstance(f_input, pathlib.Path)):
return torch._C._backport_for_mobile_to_buffer(str(f_input), to_version)
else:
return torch._C._backport_for_mobile_from_buffer_to_buffer(f_input.read(), to_version)
def _get_model_ops_and_info(f_input):
r"""
A function to retrieve the root (top level) operators of a model and their corresponding
compatibility info. These root operators can call other operators within them (traced ops), and
a root op can call many different traced ops depending on internal code paths in the root op.
These traced ops are not returned by this function. Those operators are abstracted into the
runtime as an implementation detail (and the traced ops themselves can also call other operators)
making retrieving them difficult and their value from this api negligible since they will differ
between which runtime version the model is run on. Because of this, there is a false positive this
api can't prevent in a compatibility usecase. All the root ops of a model are present in a
target runtime, but not all the traced ops are which prevents a model from being able to run.
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
Returns:
Operators and info: A Dictionary mapping strings (the qualified names of the root operators)
of the model to their OperatorInfo structs.
Example:
.. testcode::
from torch.jit.mobile import _get_model_ops_and_info
# Get bytecode version from a saved file path
ops_and_info = _get_model_ops_and_info("path/to/model.ptl")
"""
if isinstance(f_input, str):
if not os.path.exists(f_input):
raise ValueError(f"The provided filename {f_input} does not exist")
if os.path.isdir(f_input):
raise ValueError(f"The provided filename {f_input} is a directory")
if (isinstance(f_input, str) or isinstance(f_input, pathlib.Path)):
return torch._C._get_model_ops_and_info(str(f_input))
else:
return torch._C._get_model_ops_and_info(f_input.read())
| pytorch-master | torch/jit/mobile/__init__.py |
import torch._C._lazy
def reset():
"""Resets all metric counters."""
torch._C._lazy._reset_metrics()
def counter_names():
"""Retrieves all the currently active counter names."""
return torch._C._lazy._counter_names()
def counter_value(name: str):
"""Return the value of the counter with the speficied name"""
return torch._C._lazy._counter_value(name)
def metrics_report():
"""Return the combined (lazy core and backend) metric report"""
return torch._C._lazy._metrics_report()
| pytorch-master | torch/_lazy/metrics.py |
import torch._C._lazy
def get_force_fallback():
"""Get the config used to force LTC fallback"""
return torch._C._lazy._get_force_fallback()
def set_force_fallback(configval):
"""Set the config used to force LTC fallback"""
torch._C._lazy._set_force_fallback(configval)
def set_reuse_ir(val: bool):
"""Set the config to reuse IR nodes for faster tracing"""
torch._C._lazy._set_reuse_ir(val)
| pytorch-master | torch/_lazy/config.py |
import torch._C._lazy
def mark_step(device: str = "", wait=False):
"""Triggers a mark step, which amounts to
- collecting a group of 'live' lazy tensors to index into the compilation cache
(lowering/compiling their IR graphs if not cached)
- kicking off execution of the compiled function
- (optionally, wait=True) waiting for cpu-side execution to complete (does not sync the accelerator)
"""
# TODO(whc) expand this to include backend hooks and align with XLA backend needs
torch._C._lazy._mark_step(device, [], wait=wait)
def wait_device_ops(devices=None):
"""Waits for all the async operations on the given devices to complete.
Args:
devices (string..., optional): The devices whose async ops need to be waited
for. If empty, all the local devices will be waited for.
"""
if devices is None:
devices = []
torch._C._lazy._wait_device_ops(devices=devices)
def sync_multi(tensors, devices):
"""
Sync the list of lazy tensors so there IR get lowered for the activate backend
and the compiled computation graph get cached.
"""
torch._C._lazy._sync_multi(tensors, devices)
def get_tensor_id(tensor):
"""Return a unique id of the lazy tensor maintained by LTC"""
return torch._C._lazy._get_tensor_id(tensor)
| pytorch-master | torch/_lazy/__init__.py |
import torch
"""
tensor_factory_functions defines the list of torch functions that create tensors.
The list is grabbed by searching thru native_functions.yaml by the following
regular expression:
cat native_functions.yaml | grep 'func:' | grep -v "Tensor.*->" | grep "[-]>.*Tensor"
It's possible that new tensor factory functions are added making this list stale.
Use at your own risk or regenerate the list.
"""
tensor_factory_functions = (
torch._cudnn_init_dropout_state,
torch.arange,
torch.bartlett_window,
torch.blackman_window,
torch._empty_affine_quantized,
torch.empty_strided,
torch.eye,
torch.full,
torch.from_file,
torch.hann_window,
torch.hamming_window,
torch.kaiser_window,
torch.linspace,
torch.logspace,
torch.ones,
torch.scalar_tensor,
torch.rand,
torch.randint,
torch.randn,
torch.randperm,
torch.range,
torch._efficientzerotensor,
torch.zeros,
torch.tril_indices,
torch.triu_indices,
# Note: the following functions match the regular expression search above but
# they are not available in the torch module. Comment out.
# torch._sparse_coo_tensor_with_dims,
# torch.fft_fftfreq,
# torch.fft_rfftfreq,
) + (
# torch.tensor is special since it's not in native_functions.yaml
# add it separately
torch.tensor,
)
| pytorch-master | torch/_lazy/tensor_factory_functions.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.