diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab8fc11b2b91721b30f63bde9bc3c96b291090b0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_nn.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_nn.py new file mode 100644 index 0000000000000000000000000000000000000000..b750f1c80dc5e0f15a48063079706a3e5427c070 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_nn.py @@ -0,0 +1,4903 @@ +from abc import abstractmethod +import tempfile +import unittest + +from copy import deepcopy +from functools import reduce, partial, wraps +from itertools import product +from operator import mul +from math import pi + + +import torch +import torch.cuda +import torch.nn as nn +import torch.nn.functional as F +from torch.nn import _reduction as _Reduction +from torch.testing._internal.common_utils import TestCase, to_gpu, freeze_rng_state, is_iterable, \ + gradcheck, gradgradcheck, set_default_dtype +from torch.testing._internal.common_cuda import TEST_CUDA, SM90OrLater +from torch.autograd.gradcheck import _get_numerical_jacobian, _iter_tensors +from torch.autograd import Variable +from torch.types import _TensorOrTensors +import torch.backends.cudnn + +from typing import Dict, Callable, Tuple, List, Sequence, Union, Any + +TemporaryFile = tempfile.TemporaryFile +PRECISION = 1e-5 + + +def get_reduction(m): + result = getattr(m, 'reduction', None) + if result is None: + result = _Reduction.legacy_get_string(getattr(m, 'sizeAverage', None), True, emit_warning=False) + assert result is not None + return result + + +def get_weight(m): + result = getattr(m, 'weight', None) + if result is not None: + return result + return getattr(m, 'weights', None) + +# NOTE [How to check NN module / functional API parity between Python and C++ frontends] +# +# The way to check API parity is to add parity tests for the NN module / functional of interest. +# Here are the detailed steps: +# +# For NN module: +# 1. Make sure you already have a test dict with the module configuration you want to test. +# 2. Add `cpp_constructor_args` entry to the test dict, with its value exactly matching +# the Python module constructor arguments. For example, if in the test dict we pass +# `(10, 8)` to `torch.nn.Linear` constructor, then we should pass `torch::nn::LinearOptions(10, 8)` +# as the corresponding C++ constructor argument to `torch::nn::Linear`. +# 3. If in the process of performing the above step you referenced any variables +# in the `cpp_constructor_args` entry, you must add `cpp_var_map` entry +# to the test dict to make sure that those variables are populated with the right Python values. +# For example, if the Python constructor call is +# `torch.nn.FractionalMaxPool2d(2, output_ratio=0.5, _random_samples=random_samples)`, +# the corresponding C++ constructor argument is +# `torch::nn::FractionalMaxPool2dOptions(2).output_ratio(0.5)._random_samples(random_samples)`, +# and the `cpp_var_map` entry must be +# `{'random_samples': random_samples}` in order to populate the C++ variable `random_samples` +# used in the C++ constructor argument with the Python tensor value `random_samples`. +# +# For NN functional: +# 1. Make sure you already have a test dict with the functional configuration you want to test. +# 2. If the test dict's `constructor` entry looks like `wrap_functional(F.some_functional_name, ...)`, +# then you must add `cpp_options_args` entry to the test dict, with its value exactly matching the Python +# functional optional arguments. For example, if the test dict's `constructor` entry is +# `wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest')`, +# then the `cpp_options_args` entry should be +# "F::InterpolateFuncOptions().size(std::vector({12})).scale_factor(c10::nullopt).mode(torch::kNearest)". +# 3. Otherwise, if the test dict's `constructor` entry looks like +# `wrap_functional(lambda i: F.some_functional_name(...))`, +# then you must add `cpp_function_call` entry to the test dict, with its value exactly matching the Python +# functional function call. For example, if the test dict's `constructor` entry is +# `wrap_functional(lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none'))`, +# then the `cpp_function_call` entry should be +# "F::poisson_nll_loss(i, t.to(i.options()), F::PoissonNLLLossFuncOptions().reduction(torch::kNone))". +# 4. If in the process of performing the above two steps you referenced any variables +# in the `cpp_options_args` or `cpp_function_call` entry, you must +# add `cpp_var_map` entry to the test dict to make sure that those variables +# are populated with the right Python values. For example, if the test dict's `constructor` entry is +# `wrap_functional(lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none'))`, +# then the `cpp_function_call` entry should be +# "F::poisson_nll_loss(i, t.to(i.options()), F::PoissonNLLLossFuncOptions().reduction(torch::kNone))". +# Notice that there are two variables `i` and `t` that need to have their values provided, +# and the way to do so is to add a `cpp_var_map` entry: `cpp_var_map={'i': '_get_input()', 't': t}`. +# (Note that for `i`, since we want it to take the Python input value, we pass '_get_input()' string as value +# and the C++ parity test mechanism will populate `i` with the Python input value correctly.) +# +# There are also a few optional flags in the test dict to control the C++ parity test behavior: +# +# - `test_cpp_api_parity`: if `False`, skips the C++ parity test for this test dict. Default: True. +# - `has_parity`: if `False`, expects this test dict to fail the C++ parity test. Default: True. + + +module_tests = [ + dict( + module_name='Linear', + constructor_args=(10, 8), + cpp_constructor_args='torch::nn::LinearOptions(10, 8)', + input_size=(4, 10), + reference_fn=lambda i, p, _: torch.mm(i, p[0].t()) + p[1].view(1, -1).expand(4, 8), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Linear', + constructor_args=(10, 8, False), + cpp_constructor_args='torch::nn::LinearOptions(10, 8).bias(false)', + input_size=(4, 10), + desc='no_bias', + reference_fn=lambda i, p, _: torch.mm(i, p[0].t()), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='RReLU', + input_size=(1, 2, 2), + test_cuda=False, + default_dtype=torch.double, + ), + dict( + module_name='RReLU', + constructor_args=(0.1, 0.9), + cpp_constructor_args='torch::nn::RReLUOptions().lower(0.1).upper(0.9)', + input_size=(4, 4, 5), + desc='with_up_down', + test_cuda=False, + default_dtype=torch.double, + ), + dict( + module_name='Flatten', + input_size=(2, 3, 4, 5), + reference_fn=lambda i, *_: torch.flatten(i, 1), + default_dtype=torch.double, + ), + # TODO: reference function + dict( + module_name='CrossMapLRN2d', + constructor_args=(5, 5e-3, 1e-3, 2), + cpp_constructor_args='torch::nn::CrossMapLRN2dOptions(5).alpha(5e-3).beta(1e-3).k(2)', + input_size=(2, 3, 6, 6), + check_gradgrad=False, + # TODO(#50743): Figure out the error. "RuntimeError: Unrecognized tensor type ID: Batched" + check_batched_grad=False, + default_dtype=torch.double, + ), +] + + +# Generates rand tensor with non-equal values. This ensures that duplicate +# values won't be causing test failure for modules like MaxPooling. +# size should be small, otherwise randperm fails / long overflows. +def _rand_tensor_non_equal(*size): + total = reduce(mul, size, 1) + return torch.randperm(total).view(*size).double() + + +def wrap_functional(fn, **kwargs): + class FunctionalModule(nn.Module): + def forward(self, *args): + return fn(*args, **kwargs) + return FunctionalModule + + +def poissonnllloss_no_reduce_test(): + t = torch.randn(10, 10) + return dict( + fullname='PoissonNLLLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='F::poisson_nll_loss(' + 'i, t.to(i.options()), F::PoissonNLLLossFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.rand(10, 10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: i.exp() - t.mul(i), + pickle=False, + default_dtype=torch.double) + + +def bceloss_no_reduce_test(): + t = Variable(torch.randn(15, 10).gt(0).to(torch.double)) + return dict( + fullname='BCELoss_no_reduce', + constructor=wrap_functional( + lambda i: F.binary_cross_entropy(i, t.type_as(i), reduction='none')), + cpp_function_call='F::binary_cross_entropy(' + 'i, t.to(i.options()), F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: -(t * i.log() + (1 - t) * (1 - i).log()), + pickle=False, + precision=7e-4, + default_dtype=torch.double) + + +def bceloss_no_reduce_scalar_test(): + t = torch.randn(()).gt(0).to(torch.double) + return dict( + fullname='BCELoss_no_reduce_scalar', + constructor=wrap_functional( + lambda i: F.binary_cross_entropy(i, t.type_as(i), reduction='none')), + cpp_function_call='F::binary_cross_entropy(' + 'i, t.to(i.options()), F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: -(t * i.log() + (1 - t) * (1 - i).log()), + pickle=False, + default_dtype=torch.double) + + +def bceloss_weights_no_reduce_test(): + t = Variable(torch.randn(15, 10, dtype=torch.double).gt(0).to(torch.double)) + weights = torch.rand(10, dtype=torch.double) + return dict( + fullname='BCELoss_weights_no_reduce', + constructor=wrap_functional( + lambda i: F.binary_cross_entropy(i, t.type_as(i), + weight=weights.type_as(i), reduction='none')), + cpp_function_call='F::binary_cross_entropy(' + 'i, t.to(i.options()), ' + 'F::BinaryCrossEntropyFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))', + input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), + cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights}, + reference_fn=lambda i, p, m: -(t * i.log() + (1 - t) * (1 - i).log()) * weights, + pickle=False, + precision=3e-4, + default_dtype=torch.double, + ) + + +def bceloss_weights_no_reduce_scalar_test(): + t = torch.randn(()).gt(0).to(torch.double) + weights = torch.rand((), dtype=torch.double) + return dict( + fullname='BCELoss_weights_no_reduce_scalar', + constructor=wrap_functional( + lambda i: F.binary_cross_entropy(i, t.type_as(i), + weight=weights.type_as(i), reduction='none')), + cpp_function_call='''F::binary_cross_entropy( + i, t.to(i.options()), + F::BinaryCrossEntropyFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))''', + cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights}, + input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2), + reference_fn=lambda i, *_: -(t * i.log() + (1 - t) * (1 - i).log()) * weights, + pickle=False, + default_dtype=torch.double, + ) + + +def bce_with_logistic_legacy_enum_test(): + t = Variable(torch.randn(15, 10).gt(0).to(torch.double)) + sigmoid = nn.Sigmoid() + return dict( + fullname='BCEWithLogitsLoss_legacy_enum', + constructor=wrap_functional( + lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduce=False)), + cpp_function_call='''F::binary_cross_entropy_with_logits( + i, t.to(i.options()), F::BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()), + check_gradgrad=False, + pickle=False, + default_dtype=torch.double, + ) + + +def bce_with_logistic_no_reduce_test(): + t = Variable(torch.randn(15, 10).gt(0).to(torch.double)) + sigmoid = nn.Sigmoid() + return dict( + fullname='BCEWithLogitsLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduction='none')), + cpp_function_call='''F::binary_cross_entropy_with_logits( + i, t.to(i.options()), F::BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()), + check_gradgrad=False, + pickle=False, + default_dtype=torch.double, + ) + + +def bce_with_logistic_no_reduce_scalar_test(): + t = torch.randn(()).gt(0).to(torch.double) + sigmoid = nn.Sigmoid() + return dict( + fullname='BCEWithLogitsLoss_no_reduce_scalar', + constructor=wrap_functional( + lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduction='none')), + cpp_function_call='''F::binary_cross_entropy_with_logits( + i, t.to(i.options()), F::BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()), + check_gradgrad=False, + pickle=False, + default_dtype=torch.double, + ) + + +def kldivloss_with_target_no_reduce_test(): + t = torch.rand(10, 10, dtype=torch.double) + return dict( + fullname='KLDivLoss_with_target_no_reduce', + constructor=wrap_functional( + lambda i: F.kl_div(i, t.type_as(i), reduction='none')), + cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.rand(10, 10).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def kldivloss_no_reduce_test(): + t = torch.rand(10, 10, dtype=torch.double) + return dict( + fullname='KLDivLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.kl_div(i, t.type_as(i), reduction='none')), + cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.rand(10, 10).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double, + ) + + +def kldivloss_no_reduce_scalar_test(): + t = torch.rand((), dtype=torch.double) + return dict( + fullname='KLDivLoss_no_reduce_scalar', + constructor=wrap_functional( + lambda i: F.kl_div(i, t.type_as(i), reduction='none')), + cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.rand(()).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def kldivloss_with_log_target_no_reduce_test(): + t = torch.rand(10, 10, dtype=torch.double).log() + return dict( + fullname='KLDivLoss_with_log_target_no_reduce', + constructor=wrap_functional( + lambda i: F.kl_div(i, t.type_as(i), reduction='none', log_target=True)), + cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone).log_target(true))', + input_fn=lambda: torch.rand(10, 10).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['KLDivLoss_log_target'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def kldivloss_no_reduce_log_target_test(): + t = torch.rand(10, 10, dtype=torch.double).log() + return dict( + fullname='KLDivLoss_no_reduce_log_target', + constructor=wrap_functional( + lambda i: F.kl_div(i, t.type_as(i), reduction='none', log_target=True)), + cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone).log_target(true))', + input_fn=lambda: torch.rand(10, 10).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['KLDivLoss_log_target'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double, + ) + + +def kldivloss_no_reduce_scalar_log_target_test(): + t = torch.rand((), dtype=torch.double).log() + return dict( + fullname='KLDivLoss_no_reduce_scalar_log_target', + constructor=wrap_functional( + lambda i: F.kl_div(i, t.type_as(i), reduction='none', log_target=True)), + cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone).log_target(true))', + input_fn=lambda: torch.rand(()).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['KLDivLoss_log_target'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def l1loss_no_reduce_test(): + t = torch.randn(2, 3, 4, dtype=torch.double) + return dict( + fullname='L1Loss_no_reduce', + constructor=wrap_functional( + lambda i: F.l1_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='F::l1_loss(i, t.to(i.options()), F::L1LossFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.randn(2, 3, 4), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: (i - t.type_as(i)).abs(), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def l1loss_no_reduce_complex_test(): + t = torch.randn(2, 3, 4, dtype=torch.cdouble) + return dict( + fullname='L1Loss_no_reduce_complex', + constructor=wrap_functional( + lambda i: F.l1_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='F::l1_loss(i, t.to(i.options()), F::L1LossFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.randn(2, 3, 4, dtype=torch.cdouble), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: (i - t.type_as(i)).abs(), + supports_forward_ad=True, + pickle=False) + + +def l1loss_no_reduce_scalar_test(): + t = torch.randn((), dtype=torch.double) + return dict( + fullname='L1Loss_no_reduce_scalar', + constructor=wrap_functional( + lambda i: F.l1_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='F::l1_loss(i, t.to(i.options()), F::L1LossFuncOptions().reduction(torch::kNone))', + input_fn=lambda: torch.randn(()), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: (i - t.type_as(i)).abs(), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def mseloss_no_reduce_test(): + input_size = (2, 3, 4, 5) + target = torch.randn(*input_size, dtype=torch.double) + return dict( + fullname='MSELoss_no_reduce', + constructor=wrap_functional( + lambda i: F.mse_loss(i, target.type_as(i), reduction='none')), + cpp_function_call='F::mse_loss(i, target.to(i.options()), F::MSELossFuncOptions().reduction(torch::kNone))', + input_size=input_size, + cpp_var_map={'i': '_get_input()', 'target': target}, + reference_fn=lambda i, *_: (i - target).pow(2), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def mseloss_no_reduce_scalar_test(): + input_size = () + target = torch.randn(input_size, dtype=torch.double) + return dict( + fullname='MSELoss_no_reduce_scalar', + constructor=wrap_functional( + lambda i: F.mse_loss(i, target.type_as(i), reduction='none')), + cpp_function_call='F::mse_loss(i, target.to(i.options()), F::MSELossFuncOptions().reduction(torch::kNone))', + input_size=input_size, + cpp_var_map={'i': '_get_input()', 'target': target}, + reference_fn=lambda i, *_: (i - target).pow(2), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def nllloss_no_reduce_test(): + t = Variable(torch.empty(15).uniform_().mul(10).floor().long()) + kwargs = {'reduction': 'none'} + return dict( + fullname='NLLLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), reduction=kwargs['reduction'])), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.rand(15, 10).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs), + pickle=False, + default_dtype=torch.double) + + +def nllloss_no_reduce_ignore_index_test(): + t = Variable(torch.empty(15).uniform_().mul(10).floor().long()) + kwargs: Dict[str, Union[int, str]] = {'ignore_index': 2, 'reduction': 'none'} + return dict( + fullname='NLLLoss_no_reduce_ignore_index', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), ignore_index=int(kwargs['ignore_index']), + reduction=str(kwargs['reduction']))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().ignore_index(2).reduction(torch::kNone))''', + input_fn=lambda: torch.rand(15, 10).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs), + pickle=False, + default_dtype=torch.double) + + +def nllloss_no_reduce_weights_test(): + t = Variable(torch.empty(15).uniform_().mul(10).floor().long()) + weight = torch.rand(10) + + def kwargs(i): + return {'weight': weight.type_as(i), 'reduction': 'none'} + + return dict( + fullname='NLLLoss_no_reduce_weights', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), + F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone))''', + input_fn=lambda: torch.rand(15, 10).add(1e-2).log(), + cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)), + pickle=False, + default_dtype=torch.double) + + +def nllloss_no_reduce_weights_ignore_index_test(): + t = Variable(torch.empty(15).uniform_().mul(10).floor().long()) + weight = torch.rand(10) + + def kwargs(i): + return {'weight': weight.type_as(i), 'reduction': 'none', + 'ignore_index': 2} + + return dict( + fullname='NLLLoss_no_reduce_weights_ignore_index', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i.data))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), + F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone).ignore_index(2))''', + input_fn=lambda: torch.rand(15, 10).add(1e-2).log(), + cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)), + pickle=False, + default_dtype=torch.double) + + +def nllloss_no_reduce_weights_ignore_index_neg_test(): + t = Variable(torch.empty(15).uniform_().mul(10).floor().long()) + weight = torch.rand(10) + + def kwargs(i): + return {'weight': weight.type_as(i), 'reduction': 'none', + 'ignore_index': -1} + + return dict( + fullname='NLLLoss_no_reduce_weights_ignore_index_neg', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), + F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone).ignore_index(-1))''', + input=torch.rand(15, 10, dtype=torch.double).add(1e-2).log(), + cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)), + pickle=False, + default_dtype=torch.double) + + +def nllloss2d_no_reduce_test(): + t = Variable(torch.rand(2, 5, 5).mul(3).floor().long()) + kwargs = {'reduction': 'none'} + return dict( + fullname='NLLLoss2d_no_reduce', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), reduction=kwargs['reduction'])), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.rand(2, 3, 5, 5).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs), + pickle=False, + default_dtype=torch.double) + + +def nllloss2d_no_reduce_ignore_index_test(): + t = Variable(torch.rand(2, 5, 5).mul(3).floor().long()) + kwargs: Dict[str, Union[int, str]] = {'ignore_index': 1, 'reduction': 'none'} + return dict( + fullname='NLLLoss2d_no_reduce_ignore_index', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), ignore_index=int(kwargs['ignore_index']), + reduction=str(kwargs['reduction']))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().ignore_index(1).reduction(torch::kNone))''', + input_fn=lambda: torch.rand(2, 3, 5, 5).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs), + pickle=False, + default_dtype=torch.double) + + +def nllloss2d_no_reduce_weights_test(): + t = Variable(torch.rand(2, 5, 5).mul(3).floor().long()) + weight = torch.rand(3) + + def kwargs(i): + return {'weight': weight.type_as(i), 'reduction': 'none'} + + return dict( + fullname='NLLLoss2d_no_reduce_weights', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), + F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone))''', + input_fn=lambda: torch.rand(2, 3, 5, 5).log(), + cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs(i)), + pickle=False, + default_dtype=torch.double) + + +def nlllossNd_no_reduce_test(): + t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long()) + kwargs = {'reduction': 'none'} + return dict( + fullname='NLLLossNd_no_reduce', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), reduction=kwargs['reduction'])), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs), + pickle=False, + default_dtype=torch.double) + + +def nlllossNd_no_reduce_ignore_index_test(): + t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long()) + kwargs: Dict[str, Union[int, str]] = {'ignore_index': 1, 'reduction': 'none'} + return dict( + fullname='NLLLossNd_no_reduce_ignore_index', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), ignore_index=int(kwargs['ignore_index']), + reduction=str(kwargs['reduction']))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().ignore_index(1).reduction(torch::kNone))''', + input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs), + pickle=False, + default_dtype=torch.double) + + +def nlllossNd_no_reduce_weights_test(): + t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long()) + weight = torch.rand(3) + + def kwargs(i): + return {'weight': weight.type_as(i), 'reduction': 'none'} + + return dict( + fullname='NLLLossNd_no_reduce_weights', + constructor=wrap_functional( + lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))), + cpp_function_call='''F::nll_loss( + i, t.to(i.options()).to(torch::kLong), + F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone))''', + input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(), + cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight}, + reference_fn=lambda i, *_: + loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs(i)), + pickle=False, + default_dtype=torch.double) + + +def smoothl1loss_no_reduce_test(): + t = torch.randn(2, 3, 4, dtype=torch.double) + return dict( + fullname='SmoothL1Loss_no_reduce', + constructor=wrap_functional( + lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='''F::smooth_l1_loss( + i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(2, 3, 4), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def smoothl1loss_no_reduce_scalar_test(): + t = torch.randn((), dtype=torch.double) + return dict( + fullname='SmoothL1Loss_no_reduce_scalar', + constructor=wrap_functional( + lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='''F::smooth_l1_loss( + i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(()), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def smoothl1loss_beta_test(): + t = torch.randn(2, 3, 4, dtype=torch.double) + return dict( + fullname='SmoothL1Loss_beta', + constructor=wrap_functional( + lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none', beta=0.5)), + cpp_function_call='''F::smooth_l1_loss( + i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone), 0.5)''', + input_fn=lambda: torch.randn(2, 3, 4), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none', beta=0.5), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def smoothl1loss_zero_beta_test(): + t = torch.randn(2, 3, 4, dtype=torch.double) + return dict( + fullname='SmoothL1Loss_zero_beta', + constructor=wrap_functional( + lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none', beta=0)), + cpp_function_call='''F::smooth_l1_loss( + i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone), 0)''', + input_fn=lambda: torch.randn(2, 3, 4), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none', beta=0), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def huberloss_delta_test(): + t = torch.randn(2, 3, 4) + return dict( + fullname='HuberLoss_delta', + constructor=wrap_functional( + lambda i: F.huber_loss(i, t.type_as(i), reduction='none', delta=0.5)), + cpp_function_call='''F::huber_loss( + i, t.to(i.options()), F::HuberLossFuncOptions().reduction(torch::kNone).delta(0.5))''', + input_fn=lambda: torch.randn(2, 3, 4), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['HuberLoss'](i, t.type_as(i), reduction='none', delta=0.5), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def multilabelmarginloss_0d_no_reduce_test(): + t = torch.zeros(()).long() + return dict( + fullname='MultiLabelMarginLoss_0d_no_reduce', + constructor=wrap_functional( + lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')), + cpp_function_call='''F::multilabel_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(()), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False) + + +def multilabelmarginloss_1d_no_reduce_test(): + t = Variable(torch.rand(10).mul(10).floor().long()) + return dict( + fullname='MultiLabelMarginLoss_1d_no_reduce', + constructor=wrap_functional( + lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')), + cpp_function_call='''F::multilabel_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multilabelmarginloss_index_neg_test(): + t = Variable(torch.clamp(torch.rand(5, 10).add(-.5).mul(20).floor().long(), min=-1)) + return dict( + fullname='MultiLabelMarginLoss_index_neg', + constructor=wrap_functional( + lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')), + cpp_function_call='''F::multilabel_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multilabelmarginloss_no_reduce_test(): + t = Variable(torch.rand(5, 10).mul(10).floor().long()) + return dict( + fullname='MultiLabelMarginLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')), + cpp_function_call='''F::multilabel_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def hingeembeddingloss_no_reduce_test(): + t = Variable(torch.randn(10).gt(0).to(torch.double).mul_(2).sub(1)) + return dict( + fullname='HingeEmbeddingLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.hinge_embedding_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='''F::hinge_embedding_loss( + i, t.to(i.options()), F::HingeEmbeddingLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['HingeEmbeddingLoss'](i, t.type_as(i), reduction='none'), + check_sum_reduction=True, + pickle=False, + default_dtype=torch.double) + + +def hingeembeddingloss_margin_no_reduce_test(): + t = Variable(torch.randn(10).gt(0).to(torch.double).mul_(2).sub(1)) + return dict( + fullname='HingeEmbeddingLoss_margin_no_reduce', + constructor=wrap_functional( + lambda i: F.hinge_embedding_loss(i, t.type_as(i), margin=0.5, reduction='none')), + cpp_function_call='''F::hinge_embedding_loss( + i, t.to(i.options()), F::HingeEmbeddingLossFuncOptions().margin(0.5).reduction(torch::kNone))''', + input_fn=lambda: torch.randn(10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['HingeEmbeddingLoss'](i, t.type_as(i), margin=0.5, reduction='none'), + check_sum_reduction=True, + pickle=False, + default_dtype=torch.double) + + +def softmarginloss_no_reduce_test(): + t = torch.randn(5, 5, dtype=torch.double) + return dict( + fullname='SoftMarginLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.soft_margin_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='''F::soft_margin_loss( + i, t.to(i.options()), F::SoftMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 5), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['SoftMarginLoss'](i, t.type_as(i), reduction='none'), + supports_forward_ad=True, + pickle=False, + default_dtype=torch.double) + + +def multilabelsoftmarginloss_no_reduce_test(): + t = torch.rand(5, 10).mul(2).floor() + return dict( + fullname='MultiLabelSoftMarginLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.multilabel_soft_margin_loss(i, t.type_as(i), reduction='none')), + cpp_function_call='''F::multilabel_soft_margin_loss( + i, t.to(i.options()), F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + (-(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log())).sum(dim=1) / i.size(1), + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multilabelsoftmarginloss_weights_no_reduce_test(): + t = torch.rand(5, 10).mul(2).floor() + weights = torch.rand(10) + return dict( + fullname='MultiLabelSoftMarginLoss_weights_no_reduce', + constructor=wrap_functional( + lambda i: F.multilabel_soft_margin_loss(i, t.type_as(i), + weight=weights.type_as(i), reduction='none')), + cpp_function_call='''F::multilabel_soft_margin_loss( + i, t.to(i.options()), + F::MultilabelSoftMarginLossFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10), + cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights}, + reference_fn=lambda i, *_: + (-(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()) * weights).sum(dim=1) / i.size(1), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multimarginloss_no_reduce_test(): + t = torch.rand(5).mul(8).floor().long() + return dict( + fullname='MultiMarginLoss_no_reduce', + constructor=wrap_functional( + lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')), + cpp_function_call='''F::multi_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multimarginloss_1d_no_reduce_test(): + t = torch.rand(1).mul(8).floor().long() + return dict( + fullname='MultiMarginLoss_1d_no_reduce', + constructor=wrap_functional( + lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')), + cpp_function_call='''F::multi_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multimarginloss_1d_input_0d_target_no_reduce_test(): + t = torch.rand(()).mul(8).floor().long() + return dict( + fullname='multimarginloss_1d_input_0d_target_no_reduce', + constructor=wrap_functional( + lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')), + cpp_function_call='''F::multi_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().reduction(torch::kNone))''', + input_fn=lambda: torch.randn(10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multimarginloss_p_no_reduce_test(): + t = torch.rand(5).mul(8).floor().long() + return dict( + fullname='MultiMarginLoss_p_no_reduce', + constructor=wrap_functional( + lambda i: F.multi_margin_loss(i, t.type_as(i).long(), p=2, reduction='none')), + cpp_function_call='''F::multi_margin_loss( + i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().p(2).reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10).clamp_(1e-2, 1 - 1e-2), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), p=2, reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multimarginloss_margin_no_reduce_test(): + t = torch.rand(5).mul(8).floor().long() + return dict( + fullname='MultiMarginLoss_margin_no_reduce', + constructor=wrap_functional( + lambda i: F.multi_margin_loss(i, t.type_as(i).long(), margin=0.5, reduction='none')), + cpp_function_call='''F::multi_margin_loss( + i, t.to(i.options()).to(torch::kLong), + F::MultiMarginLossFuncOptions().margin(0.5).reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10), + cpp_var_map={'i': '_get_input()', 't': t}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), + margin=0.5, reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def multimarginloss_weights_no_reduce_test(): + t = torch.rand(5).mul(8).floor().long() + weights = torch.rand(10, dtype=torch.double) + return dict( + fullname='MultiMarginLoss_weights_no_reduce', + constructor=wrap_functional( + lambda i: F.multi_margin_loss(i, t.type_as(i).long(), weight=weights.type_as(i), + reduction='none')), + cpp_function_call='''F::multi_margin_loss( + i, t.to(i.options()).to(torch::kLong), + F::MultiMarginLossFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))''', + input_fn=lambda: torch.randn(5, 10), + cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights}, + reference_fn=lambda i, *_: + loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), + weight=weights, reduction='none'), + check_sum_reduction=True, + check_gradgrad=False, + pickle=False, + default_dtype=torch.double) + + +def single_batch_reference_fn(input, parameters, module): + """Reference function for modules supporting no batch dimensions. + + The module is passed the input and target in batched form with a single item. + The output is squeezed to compare with the no-batch input. + """ + def unsqueeze_inp(inp): + if isinstance(inp, (list, tuple)): + return [t.unsqueeze(0) for t in inp] + return inp.unsqueeze(0) + + single_batch_input = unsqueeze_inp(input) + single_batch_input = [single_batch_input] if isinstance(single_batch_input, torch.Tensor) else single_batch_input + with freeze_rng_state(): + return module(*single_batch_input).squeeze(0) + + +new_module_tests = [ + poissonnllloss_no_reduce_test(), + bceloss_no_reduce_test(), + bceloss_weights_no_reduce_test(), + bce_with_logistic_legacy_enum_test(), + bce_with_logistic_no_reduce_test(), + bceloss_no_reduce_scalar_test(), + bceloss_weights_no_reduce_scalar_test(), + bce_with_logistic_no_reduce_scalar_test(), + kldivloss_with_target_no_reduce_test(), + kldivloss_no_reduce_test(), + kldivloss_no_reduce_scalar_test(), + kldivloss_with_log_target_no_reduce_test(), + kldivloss_no_reduce_log_target_test(), + kldivloss_no_reduce_scalar_log_target_test(), + l1loss_no_reduce_test(), + l1loss_no_reduce_complex_test(), + l1loss_no_reduce_scalar_test(), + mseloss_no_reduce_test(), + mseloss_no_reduce_scalar_test(), + nllloss_no_reduce_test(), + nllloss_no_reduce_ignore_index_test(), + nllloss_no_reduce_weights_test(), + nllloss_no_reduce_weights_ignore_index_test(), + nllloss_no_reduce_weights_ignore_index_neg_test(), + nllloss2d_no_reduce_test(), + nllloss2d_no_reduce_weights_test(), + nllloss2d_no_reduce_ignore_index_test(), + nlllossNd_no_reduce_test(), + nlllossNd_no_reduce_weights_test(), + nlllossNd_no_reduce_ignore_index_test(), + smoothl1loss_no_reduce_test(), + smoothl1loss_no_reduce_scalar_test(), + smoothl1loss_beta_test(), + smoothl1loss_zero_beta_test(), + huberloss_delta_test(), + multilabelmarginloss_0d_no_reduce_test(), + multilabelmarginloss_1d_no_reduce_test(), + multilabelmarginloss_index_neg_test(), + multilabelmarginloss_no_reduce_test(), + hingeembeddingloss_no_reduce_test(), + hingeembeddingloss_margin_no_reduce_test(), + softmarginloss_no_reduce_test(), + multilabelsoftmarginloss_no_reduce_test(), + multilabelsoftmarginloss_weights_no_reduce_test(), + multimarginloss_no_reduce_test(), + multimarginloss_1d_no_reduce_test(), + multimarginloss_1d_input_0d_target_no_reduce_test(), + multimarginloss_p_no_reduce_test(), + multimarginloss_margin_no_reduce_test(), + multimarginloss_weights_no_reduce_test(), + dict( + module_name='Conv1d', + constructor_args=(4, 5, 3), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3)', + input_size=(2, 4, 10), + cudnn=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv1d', + constructor_args=(4, 5, 3, 2), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).stride(2)', + input_size=(2, 4, 10), + cudnn=True, + desc='stride', + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv1d', + constructor_args=(4, 5, 3, 1, 1), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).stride(1).padding(1)', + input_size=(2, 4, 10), + cudnn=True, + desc='pad1', + with_tf32=True, + tf32_precision=0.01, + default_dtype=torch.double, + ), + dict( + module_name='Conv1d', + constructor_args=(4, 5, 5, 1, 2), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 5).stride(1).padding(2)', + input_size=(2, 4, 10), + cudnn=True, + desc='pad2', + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv1d', + constructor_args=(4, 4, 3, 1, 1), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 4, 3).stride(1).padding(1)', + input_size=(1, 4, 1), + cudnn=True, + desc='pad1size1', + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv1d', + constructor_args=(4, 4, 5, 1, 2), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 4, 5).stride(1).padding(2)', + input_size=(1, 4, 1), + cudnn=True, + desc='pad2size1', + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv1d', + constructor_args=(4, 5, 3), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3)', + input_size=(0, 4, 10), + cudnn=True, + desc='zero_batch', + with_tf32=True, + tf32_precision=0.005, + ), + dict( + fullname='Conv1d_dilated', + constructor=lambda: nn.Conv1d(4, 5, kernel_size=3, dilation=2), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).dilation(2)', + input_size=(2, 4, 10), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv1d_groups', + constructor=lambda: nn.Conv1d(4, 6, kernel_size=3, groups=2), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 6, 3).groups(2)', + input_size=(2, 4, 6), + cudnn=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv1d_pad_valid', + constructor=lambda: nn.Conv1d(4, 5, 3, padding="valid"), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).padding(torch::kValid)', + input_size=(2, 4, 10), + cudnn=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv1d_pad_same', + constructor=lambda: nn.Conv1d(4, 5, 3, padding="same"), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).padding(torch::kSame)', + input_size=(2, 4, 10), + cudnn=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv1d_pad_same2', + constructor=lambda: nn.Conv1d(4, 5, 4, padding="same"), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 4).padding(torch::kSame)', + input_size=(2, 4, 10), + cudnn=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv1d_pad_same_dilated', + constructor=lambda: nn.Conv1d(4, 5, 4, padding="same", dilation=2), + cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).padding(torch::kSame).dilation(2)', + input_size=(2, 4, 10), + cudnn=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='ConvTranspose1d', + constructor=lambda: nn.ConvTranspose1d(3, 4, kernel_size=3, stride=(3,), padding=1, output_padding=(1,)), + cpp_constructor_args='torch::nn::ConvTranspose1dOptions(3, 4, 3).stride(3).padding(1).output_padding(1)', + cudnn=True, + input_size=(1, 3, 7), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='ConvTranspose1d', + constructor_args=(3, 4, 3, 2, 1, 1, 1, False), + cpp_constructor_args='''torch::nn::ConvTranspose1dOptions(3, 4, 3) + .stride(2).padding(1).output_padding(1).groups(1).bias(false)''', + input_size=(1, 3, 6), + cudnn=True, + desc='no_bias', + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='ConvTranspose1d', + constructor_args=(3, 4, 3, 2, 1, 1, 1, True, 2), + cpp_constructor_args='''torch::nn::ConvTranspose1dOptions(3, 4, 3) + .stride(2).padding(1).output_padding(1).groups(1).bias(true).dilation(2)''', + input_size=(1, 3, 6), + cudnn=True, + desc='dilated', + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='ConvTranspose1d_groups', + constructor=lambda: nn.ConvTranspose1d(4, 6, 3, stride=(3,), padding=1, output_padding=(1,), groups=2), + cpp_constructor_args='''torch::nn::ConvTranspose1dOptions(4, 6, 3) + .stride(3).padding(1).output_padding(1).groups(2)''', + cudnn=True, + input_size=(2, 4, 7), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv2d', + constructor_args=(3, 4, (3, 2)), + cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 2})', + input_size=(2, 3, 7, 5), + cudnn=True, + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv2d', + constructor_args=(3, 4, (3, 3), (2, 2)), + cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 3}).stride({2, 2})', + input_size=(2, 3, 6, 6), + cudnn=True, + desc='strided', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv2d', + constructor_args=(3, 4, (3, 3), (2, 2), (1, 1)), + cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 3}).stride({2, 2}).padding({1, 1})', + input_size=(2, 3, 6, 6), + cudnn=True, + desc='padding', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv2d', + constructor_args=(3, 2, (3, 3), (2, 2), (1, 1), (2, 2)), + cpp_constructor_args='torch::nn::Conv2dOptions(3, 2, {3, 3}).stride({2, 2}).padding({1, 1}).dilation({2, 2})', + input_size=(2, 3, 8, 8), + cudnn=True, + desc='dilated', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv2d', + constructor_args=(3, 4, (3, 2), 1, 0, 1, 1, False), + cpp_constructor_args='''torch::nn::Conv2dOptions(3, 4, {3, 2}) + .stride(1).padding(0).dilation(1).groups(1).bias(false)''', + input_size=(2, 3, 6, 5), + cudnn=True, + desc='no_bias', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.015, + default_dtype=torch.double, + ), + dict( + module_name='Conv2d', + constructor_args=(3, 4, (3, 2)), + cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 2})', + input_size=(0, 3, 7, 5), + cudnn=True, + desc='zero_batch', + check_with_long_tensor=True, + with_tf32=True, + ), + dict( + fullname='Conv2d_groups', + constructor=lambda: nn.Conv2d(4, 6, (3, 2), groups=2), + cpp_constructor_args='torch::nn::Conv2dOptions(4, 6, {3, 2}).groups(2)', + input_size=(2, 4, 6, 5), + cudnn=True, + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.015, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_groups_thnn', + constructor=lambda: nn.Conv2d(4, 6, (3, 2), groups=2), + cpp_constructor_args='torch::nn::Conv2dOptions(4, 6, {3, 2}).groups(2)', + input_size=(2, 4, 6, 5), + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.015, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_pad_valid', + constructor=lambda: nn.Conv2d(2, 4, (3, 4), padding="valid"), + cpp_constructor_args='torch::nn::Conv2dOptions(2, 4, {3, 4}).padding(torch::kValid)', + input_size=(2, 2, 6, 5), + cudnn=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_pad_same', + constructor=lambda: nn.Conv2d(2, 4, (3, 4), padding="same"), + cpp_constructor_args='torch::nn::Conv2dOptions(2, 4, {3, 4}).padding(torch::kSame)', + input_size=(2, 2, 6, 5), + cudnn=True, + with_tf32=True, + tf32_precision=0.01, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_pad_same_dilated', + constructor=lambda: nn.Conv2d(2, 4, (3, 4), padding="same", dilation=2), + cpp_constructor_args='torch::nn::Conv2dOptions(2, 4, {3, 4}).padding(torch::kSame).dilation(2)', + input_size=(2, 2, 6, 5), + cudnn=True, + with_tf32=True, + tf32_precision=0.01, + default_dtype=torch.double, + ), + dict( + module_name='ConvTranspose2d', + constructor_args=(3, 4, 3, (3, 2), 1, (1, 1)), + cpp_constructor_args='''torch::nn::ConvTranspose2dOptions(3, 4, 3) + .stride({3, 2}).padding(1).output_padding({1, 1})''', + cudnn=True, + input_size=(1, 3, 7, 6), + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.01, + default_dtype=torch.double, + ), + dict( + module_name='ConvTranspose2d', + constructor_args=(3, 4, 3, (2, 3), 1, (1, 1), 1, False, (2, 2)), + cpp_constructor_args='''torch::nn::ConvTranspose2dOptions(3, 4, 3) + .stride({2, 3}) + .padding(1) + .output_padding({1, 1}) + .groups(1) + .bias(false) + .dilation({2, 2})''', + input_size=(1, 3, 6, 7), + cudnn=True, + desc='dilated', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.01, + default_dtype=torch.double, + ), + dict( + module_name='ConvTranspose2d', + constructor_args=(3, 4, 3, (2, 3), 1, (1, 1), 1, False), + cpp_constructor_args='''torch::nn::ConvTranspose2dOptions(3, 4, 3) + .stride({2, 3}).padding(1).output_padding({1, 1}).groups(1).bias(false)''', + input_size=(1, 3, 6, 7), + cudnn=True, + desc='no_bias', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.01, + default_dtype=torch.double, + ), + dict( + fullname='ConvTranspose2d_groups', + constructor=lambda: nn.ConvTranspose2d(2, 4, (2, 3), groups=2), + cpp_constructor_args='torch::nn::ConvTranspose2dOptions(2, 4, {2, 3}).groups(2)', + input_size=(1, 2, 4, 5), + cudnn=True, + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.01, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_depthwise', + constructor=lambda: nn.Conv2d(4, 4, (3, 3), groups=4), + cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {3, 3}).groups(4)', + input_size=(2, 4, 6, 6), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_depthwise_with_multiplier', + constructor=lambda: nn.Conv2d(4, 8, (3, 3), groups=4), + cpp_constructor_args='torch::nn::Conv2dOptions(4, 8, {3, 3}).groups(4)', + input_size=(2, 4, 6, 6), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_depthwise_strided', + constructor=lambda: nn.Conv2d(4, 4, (3, 3), stride=(2, 2), groups=4), + cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {3, 3}).stride({2, 2}).groups(4)', + input_size=(2, 4, 6, 6), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_depthwise_padded', + constructor=lambda: nn.Conv2d(4, 4, (3, 3), padding=(1, 1), groups=4), + cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {3, 3}).padding({1, 1}).groups(4)', + input_size=(2, 4, 6, 6), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv2d_depthwise_dilated', + constructor=lambda: nn.Conv2d(4, 4, (2, 2), dilation=(2, 2), groups=4), + cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {2, 2}).dilation({2, 2}).groups(4)', + input_size=(2, 4, 5, 5), + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Conv3d', + constructor_args=(2, 3, (2, 3, 2)), + cpp_constructor_args='torch::nn::Conv3dOptions(2, 3, {2, 3, 2})', + input_size=(1, 2, 4, 5, 4), + cudnn=True, + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='Conv3d', + constructor_args=(2, 3, (2, 3, 4), 1, 0, 1, 1, False), + cpp_constructor_args='''torch::nn::Conv3dOptions(2, 3, {2, 3, 4}) + .stride(1).padding(0).dilation(1).groups(1).bias(false)''', + input_size=(1, 2, 3, 4, 5), + cudnn=True, + desc='no_bias', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='Conv3d', + constructor_args=(2, 3, (1, 1, 1), 1, 0, 1, 1, False), + cpp_constructor_args='''torch::nn::Conv3dOptions(2, 3, {2, 3, 4}) + .stride(1).padding(0).dilation(1).groups(1).bias(false)''', + input_size=(1, 2, 3, 4, 5), + cudnn=True, + desc='1x1x1_no_bias', + check_with_long_tensor=False, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='Conv3d', + constructor_args=(3, 4, 2, 2), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).stride(2)', + input_size=(2, 3, 5, 5, 5), + cudnn=True, + desc='stride', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='Conv3d', + constructor_args=(3, 4, 2, 2, 1), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).stride(2).padding(1)', + input_size=(2, 3, 5, 5, 5), + cudnn=True, + desc='stride_padding', + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='Conv3d', + constructor_args=(3, 4, (2, 3, 4)), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4})', + input_size=(0, 3, 3, 4, 5), + cudnn=True, + check_with_long_tensor=True, + desc='zero_batch', + with_tf32=True, + ), + dict( + fullname='Conv3d_groups', + constructor=lambda: nn.Conv3d(2, 4, kernel_size=3, groups=2), + cpp_constructor_args='torch::nn::Conv3dOptions(2, 4, 3).groups(2)', + input_size=(1, 2, 4, 5, 4), + cudnn=True, + check_with_long_tensor=True, + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + fullname='Conv3d_dilated', + constructor=lambda: nn.Conv3d(3, 4, kernel_size=2, dilation=2), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).dilation(2)', + input_size=(2, 3, 5, 5, 5), + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + fullname='Conv3d_dilated_strided', + constructor=lambda: nn.Conv3d(3, 4, kernel_size=2, dilation=2, stride=2), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).dilation(2).stride(2)', + input_size=(2, 3, 5, 5, 5), + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + fullname='Conv3d_pad_valid', + constructor=lambda: nn.Conv3d(3, 4, (2, 3, 4), padding="valid"), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4}).padding(torch::kValid)', + input_size=(2, 3, 6, 5, 4), + cudnn=True, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + fullname='Conv3d_pad_same', + constructor=lambda: nn.Conv3d(3, 4, (2, 3, 4), padding="same"), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4}).padding(torch::kSame)', + input_size=(2, 3, 6, 5, 4), + cudnn=True, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + fullname='Conv3d_pad_same_dilated', + constructor=lambda: nn.Conv3d(3, 4, (2, 3, 4), padding="same", dilation=2), + cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4}).padding(torch::kSame).dilation(2)', + input_size=(2, 3, 6, 5, 4), + cudnn=True, + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='ConvTranspose3d', + constructor_args=(2, 3, (2, 3, 2)), + cpp_constructor_args='torch::nn::ConvTranspose3dOptions(2, 3, {2, 3, 2})', + cudnn=True, + input_size=(1, 2, 4, 5, 4), + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='ConvTranspose3d', + constructor_args=(2, 3, (2, 3, 2), 1, 0, 0, 1, True, (2, 2, 2)), + cpp_constructor_args='''torch::nn::ConvTranspose3dOptions(2, 3, {2, 3, 2}) + .stride(1).padding(0).output_padding(0).groups(1).bias(true).dilation({2, 2, 2})''', + cudnn=True, + input_size=(1, 2, 4, 5, 4), + desc='dilated', + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='ReplicationPad3d', + constructor_args=((1, 2, 3, 3, 2, 1),), + cpp_constructor_args='torch::nn::ReplicationPad3dOptions({1, 2, 3, 3, 2, 1})', + input_size=(2, 3, 2, 2, 2), + default_dtype=torch.double, + ), + dict( + module_name='ReplicationPad3d', + constructor_args=((1, 2, 3, 3, 2, 1),), + cpp_constructor_args='torch::nn::ReplicationPad3dOptions({1, 2, 3, 3, 2, 1})', + input_size=(3, 2, 2, 2), + reference_fn=single_batch_reference_fn, + desc='no_batch_dim', + default_dtype=torch.double, + ), + dict( + module_name='ReplicationPad3d', + constructor_args=((1, 2, 3, 3, 2, 1),), + cpp_constructor_args='torch::nn::ReplicationPad3dOptions({1, 2, 3, 3, 2, 1})', + input_fn=lambda: torch.rand(2, 3, 2, 2, 2, dtype=torch.complex128, requires_grad=True), + skip_half=True, + desc='complex' + ), + dict( + module_name='Embedding', + constructor_args=(4, 3), + cpp_constructor_args='torch::nn::EmbeddingOptions(4, 3)', + input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4), + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + module_name='Embedding', + constructor_args=(4, 3), + cpp_constructor_args='torch::nn::EmbeddingOptions(4, 3)', + input_fn=lambda: torch.empty(1, 512, dtype=torch.long).random_(4).expand(7, 512), + check_gradgrad=False, + desc='discontiguous', + default_dtype=torch.double, + ), + dict( + module_name='EmbeddingBag', + constructor_args=(4, 3), + cpp_constructor_args='torch::nn::EmbeddingBagOptions(4, 3)', + input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4), + check_gradgrad=False, + desc='mean', + default_dtype=torch.double, + ), + dict( + module_name='EmbeddingBag', + constructor_args=(4, 3), + cpp_constructor_args='torch::nn::EmbeddingBagOptions(4, 3)', + input_fn=lambda: torch.empty(1, 512, dtype=torch.long).random_(4).expand(7, 512), + check_gradgrad=False, + desc='discontiguous', + default_dtype=torch.double, + ), + dict( + module_name='EmbeddingBag', + constructor_args=(4, 3, None, 2., False, 'sum'), + cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3) + .max_norm(c10::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kSum)''', + input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4), + check_gradgrad=False, + desc='sum', + default_dtype=torch.double, + ), + dict( + module_name='EmbeddingBag', + constructor_args=(4, 3, None, 2., False, 'max'), + cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3) + .max_norm(c10::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kMax)''', + input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4), + check_gradgrad=False, + desc='max', + default_dtype=torch.double, + ), + dict( + fullname='EmbeddingBag_mean_padding_idx', + constructor=lambda: nn.EmbeddingBag(4, 3, padding_idx=1), + cpp_constructor_args='torch::nn::EmbeddingBagOptions(4, 3).padding_idx(1)', + input_fn=lambda: torch.stack([torch.randperm(3), torch.randperm(3)]), + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + fullname='EmbeddingBag_sum_padding_idx', + constructor=lambda: nn.EmbeddingBag(4, 3, None, 2., False, 'sum', padding_idx=1), + cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3) + .max_norm(c10::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kSum).padding_idx(1)''', + input_fn=lambda: torch.stack([torch.randperm(3), torch.randperm(3)]), + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + fullname='EmbeddingBag_max_padding_idx', + constructor=lambda: nn.EmbeddingBag(4, 3, None, 2., False, 'max', padding_idx=1), + cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3) + .max_norm(c10::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kMax).padding_idx(1)''', + input_fn=lambda: torch.stack([torch.randperm(3), torch.randperm(3)]), + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + fullname='EmbeddingBag_sparse', + constructor=lambda: nn.EmbeddingBag(4, 3, sparse=True, dtype=torch.double), + cpp_constructor_args='torch::nn::EmbeddingBagOptions(4, 3).sparse(true)._weight(torch::rand({4, 3}).to(torch::kFloat64))', + input_fn=lambda: torch.randperm(2).repeat(1, 2), + check_gradgrad=False, + has_sparse_gradients=True, + ), + dict( + constructor=lambda: nn.Embedding(4, 3, dtype=torch.double, sparse=True), + cpp_constructor_args='torch::nn::EmbeddingOptions(4, 3).sparse(true)._weight(torch::rand({4, 3}).to(torch::kFloat64))', + input_fn=lambda: torch.randperm(2).repeat(1, 2), + fullname='Embedding_sparse', + check_gradgrad=False, + has_sparse_gradients=True, + ), + dict( + module_name='PixelShuffle', + constructor_args=(3,), + cpp_constructor_args='torch::nn::PixelShuffleOptions(3)', + input_size=(1, 9, 4, 4), + default_dtype=torch.double, + ), + dict( + module_name='PixelUnshuffle', + constructor_args=(3,), + cpp_constructor_args='torch::nn::PixelUnshuffleOptions(3)', + input_size=(1, 1, 12, 12), + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12})).scale_factor(c10::nullopt).mode(torch::kNearest)''', + input_size=(1, 2, 4), + fullname='interpolate_nearest_1d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12})).scale_factor(c10::nullopt).mode(torch::kNearest)''', + input_size=(0, 2, 4), + fullname='interpolate_nearest_1d_zero_dim', + pickle=False, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(12, ), scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12})).scale_factor(c10::nullopt).mode(torch::kNearest)''', + input_size=(1, 2, 3), + fullname='interpolate_nearest_tuple_1d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt).scale_factor(std::vector({4.})).mode(torch::kNearest)''', + input_size=(1, 2, 4), + fullname='interpolate_nearest_scale_1d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='linear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12})) + .scale_factor(c10::nullopt) + .mode(torch::kLinear) + .align_corners(false)''', + input_size=(1, 2, 4), + fullname='interpolate_linear_1d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(4, ), scale_factor=None, mode='linear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({4})) + .scale_factor(c10::nullopt) + .mode(torch::kLinear) + .align_corners(false)''', + input_size=(1, 2, 3), + fullname='interpolate_linear_tuple_1d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='linear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({4.})) + .mode(torch::kLinear) + .align_corners(false)''', + input_size=(1, 2, 4), + fullname='interpolate_linear_scale_1d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='linear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12})) + .scale_factor(c10::nullopt) + .mode(torch::kLinear) + .align_corners(false)''', + input_size=(0, 2, 4), + fullname='interpolate_linear_1d_zero_dim', + pickle=False, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='linear', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12})) + .scale_factor(c10::nullopt) + .mode(torch::kLinear) + .align_corners(true)''', + input_size=(1, 2, 4), + fullname='interpolate_linear_1d_align_corners', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='linear', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({4.})) + .mode(torch::kLinear) + .align_corners(true)''', + input_size=(1, 2, 4), + fullname='interpolate_linear_scale_1d_align_corners', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=2, scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({2, 2})) + .scale_factor(c10::nullopt) + .mode(torch::kNearest)''', + input_size=(1, 128, 1, 1), + fullname='interpolate_nearest_2d_launch_configs', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kNearest)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_nearest_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(12, 16), scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 16})) + .scale_factor(c10::nullopt) + .mode(torch::kNearest)''', + input_size=(1, 2, 3, 4), + fullname='interpolate_nearest_tuple_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({4., 4.})) + .mode(torch::kNearest)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_nearest_scale_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kNearest)''', + input_size=(0, 2, 4, 4), + fullname='interpolate_nearest_2d_zero_dim', + pickle=False, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kBilinear) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bilinear_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kBilinear) + .align_corners(false)''', + input_size=(0, 2, 4, 4), + fullname='interpolate_bilinear_2d_zero_dim', + pickle=False, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, + mode='bilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({4, 6})) + .scale_factor(c10::nullopt) + .mode(torch::kBilinear) + .align_corners(false)''', + input_size=(1, 2, 2, 3), + fullname='interpolate_bilinear_tuple_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., + mode='bilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({4., 4.})) + .mode(torch::kBilinear) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bilinear_scale_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 2.), + mode='bilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({2., 2.})) + .mode(torch::kBilinear) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bilinear_scale_tuple_shared_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.), + mode='bilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({2., 1.})) + .mode(torch::kBilinear) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bilinear_scale_tuple_skewed_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, mode='bilinear', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({4, 6})) + .scale_factor(c10::nullopt) + .mode(torch::kBilinear) + .align_corners(true)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bilinear_tuple_2d_align_corners', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.), + mode='bilinear', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({2., 1.})) + .mode(torch::kBilinear) + .align_corners(true)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bilinear_scale_tuple_skewed_2d_align_corners', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bicubic', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kBicubic) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bicubic_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bicubic', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kBicubic) + .align_corners(false)''', + input_size=(0, 2, 4, 4), + fullname='interpolate_bicubic_2d_zero_dim', + pickle=False, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, + mode='bicubic', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({4, 6})) + .scale_factor(c10::nullopt) + .mode(torch::kBicubic) + .align_corners(false)''', + input_size=(1, 2, 2, 3), + fullname='interpolate_bicubic_tuple_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='bicubic', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({4., 4.})) + .mode(torch::kBicubic) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bicubic_scale_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 2.), + mode='bicubic', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({2., 2.})) + .mode(torch::kBicubic) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bicubic_scale_tuple_shared_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.), + mode='bicubic', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({2., 1.})) + .mode(torch::kBicubic) + .align_corners(false)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bicubic_scale_tuple_skewed_2d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, mode='bicubic', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({4, 6})) + .scale_factor(c10::nullopt) + .mode(torch::kBicubic) + .align_corners(true)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bicubic_tuple_2d_align_corners', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.), + mode='bicubic', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({2., 1.})) + .mode(torch::kBicubic) + .align_corners(true)''', + input_size=(1, 2, 4, 4), + fullname='interpolate_bicubic_scale_tuple_skewed_2d_align_corners', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kNearest)''', + input_size=(1, 2, 4, 4, 4), + fullname='interpolate_nearest_3d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kNearest)''', + input_size=(0, 2, 4, 4, 4), + fullname='interpolate_nearest_3d_zero_dim', + pickle=False, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(12, 16, 16), scale_factor=None, mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 16, 16})) + .scale_factor(c10::nullopt) + .mode(torch::kNearest)''', + input_size=(1, 2, 3, 4, 4), + fullname='interpolate_nearest_tuple_3d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='nearest'), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({4., 4., 4.})) + .mode(torch::kNearest)''', + input_size=(1, 2, 4, 4, 4), + fullname='interpolate_nearest_scale_3d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='trilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kTrilinear) + .align_corners(false)''', + input_size=(1, 2, 4, 4, 4), + fullname='interpolate_trilinear_3d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='trilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({12, 12, 12})) + .scale_factor(c10::nullopt) + .mode(torch::kTrilinear) + .align_corners(false)''', + input_size=(0, 2, 4, 4, 4), + fullname='interpolate_trilinear_3d_zero_dim', + pickle=False, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(4, 6, 6), + scale_factor=None, mode='trilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({4, 6, 6})) + .scale_factor(c10::nullopt) + .mode(torch::kTrilinear) + .align_corners(false)''', + input_size=(1, 2, 2, 3, 3), + fullname='interpolate_trilinear_tuple_3d', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=3., mode='trilinear', align_corners=False), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({3., 3., 3.})) + .mode(torch::kTrilinear) + .align_corners(false)''', + input_size=(1, 2, 3, 4, 5), + fullname='interpolate_trilinear_scale_3d', + # See https://github.com/pytorch/pytorch/issues/5006 + precision=3e-4, + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.interpolate, size=(4, 6, 6), scale_factor=None, + mode='trilinear', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(std::vector({4, 6, 6})) + .scale_factor(c10::nullopt) + .mode(torch::kTrilinear) + .align_corners(true)''', + input_size=(1, 2, 2, 3, 3), + fullname='interpolate_trilinear_tuple_3d_align_corners', + pickle=False, + default_dtype=torch.double + ), + dict( + constructor=wrap_functional(F.interpolate, size=None, scale_factor=3., mode='trilinear', align_corners=True), + cpp_options_args='''F::InterpolateFuncOptions() + .size(c10::nullopt) + .scale_factor(std::vector({3., 3., 3.})) + .mode(torch::kTrilinear) + .align_corners(true)''', + input_size=(1, 2, 3, 4, 4), + fullname='interpolate_trilinear_scale_3d_align_corners', + # See https://github.com/pytorch/pytorch/issues/5006 + precision=3e-4, + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=-1), + cpp_options_args='F::SoftmaxFuncOptions(-1)', + input_size=(2, 128), # trigger the last-dim algo in CUDA + fullname='softmax_lastdim', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=1, dtype=torch.float64), + cpp_options_args='F::SoftmaxFuncOptions(1).dtype(torch::kFloat64)', + input_size=(2, 128), + fullname='softmax_lastdim_dtype', + pickle=False, + test_cuda=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=1), + cpp_options_args='F::SoftmaxFuncOptions(1)', + input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo + fullname='softmax_spatial_special', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=1), + cpp_options_args='F::SoftmaxFuncOptions(1)', + input_size=(2, 2, 4, 4), # regular spatial algorithm + fullname='softmax_spatial', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=1, dtype=torch.float64), + cpp_options_args='F::SoftmaxFuncOptions(1).dtype(torch::kFloat64)', + input_size=(2, 2, 4, 4), # regular spatial algorithm + fullname='softmax_spatial_dtype', + pickle=False, + test_cuda=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=0), + cpp_options_args='F::SoftmaxFuncOptions(0)', + input_size=(2, 3, 4, 5), + fullname='softmax_functional_dim0', + test_cuda=False, + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=3), + cpp_options_args='F::SoftmaxFuncOptions(3)', + input_size=(2, 3, 4, 5), + fullname='softmax_functional_dim3', + test_cuda=False, + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.softmax, dim=-1), + cpp_options_args='F::SoftmaxFuncOptions(-1)', + input_size=(), + fullname='softmax_functional_scalar', + test_cuda=False, + pickle=False, + ), + dict( + constructor=wrap_functional(F.log_softmax, dim=-1), + cpp_options_args='F::LogSoftmaxFuncOptions(-1)', + input_size=(2, 128), # trigger the last-dim algo in CUDA + fullname='log_softmax_lastdim', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.log_softmax, dim=1), + cpp_options_args='F::LogSoftmaxFuncOptions(1)', + input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo + fullname='log_softmax_spatial_special', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.log_softmax, dim=1), + cpp_options_args='F::LogSoftmaxFuncOptions(1)', + input_size=(2, 2, 4, 4), # regular spatial algorithm + fullname='log_softmax_spatial', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.log_softmax, dim=0), + cpp_options_args='F::LogSoftmaxFuncOptions(0)', + input_size=(2, 3, 4, 5), + fullname='log_softmax_dim0', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.log_softmax, dim=3), + cpp_options_args='F::LogSoftmaxFuncOptions(3)', + input_size=(2, 3, 4, 5), + fullname='log_softmax_dim3', + pickle=False, + default_dtype=torch.double, + ), + dict( + constructor=wrap_functional(F.log_softmax, dim=0), + cpp_options_args='F::LogSoftmaxFuncOptions(0)', + input_size=(), + fullname='log_softmax_scalar', + pickle=False, + ), + dict( + fullname='Unfold', + constructor=lambda: nn.Unfold((2, 2), (1, 1), (0, 0), (1, 1)), + cpp_constructor_args='torch::nn::UnfoldOptions({2, 2}).dilation({1, 1}).padding({0, 0}).stride({1, 1})', + input_size=(2, 4, 3, 3), + check_gradgrad=False, + test_cuda=True, + default_dtype=torch.double, + ), + dict( + fullname='Fold', + constructor=lambda: nn.Fold((3, 3), (2, 2), (1, 1), (0, 0), (1, 1)), + cpp_constructor_args='torch::nn::FoldOptions({3, 3}, {2, 2}).dilation({1, 1}).padding({0, 0}).stride({1, 1})', + input_size=(2, 16, 4), + check_gradgrad=False, + test_cuda=True, + default_dtype=torch.double, + ), + dict( + fullname='Fold_no_batch_dim_input', + constructor=lambda: nn.Fold((3, 3), (2, 2), (1, 1), (0, 0), (1, 1)), + cpp_constructor_args='torch::nn::FoldOptions({3, 3}, {2, 2}).dilation({1, 1}).padding({0, 0}).stride({1, 1})', + input_size=(16, 4), + check_gradgrad=False, + ref=single_batch_reference_fn, + test_cuda=True, + default_dtype=torch.double, + ), + dict( + fullname='Unfold_int_input', + constructor=lambda: nn.Unfold(2, 1, 0, 1), + cpp_constructor_args='torch::nn::UnfoldOptions(2).dilation(1).padding(0).stride(1)', + input_size=(2, 4, 3, 3), + check_gradgrad=False, + test_cuda=True, + default_dtype=torch.double, + ), + dict( + fullname='Fold_int_input', + constructor=lambda: nn.Fold(3, 2, 1, 0, 1), + cpp_constructor_args='torch::nn::FoldOptions(3, 2).dilation(1).padding(0).stride(1)', + input_size=(2, 16, 4), + check_gradgrad=False, + test_cuda=True, + default_dtype=torch.double, + ), + dict( + fullname='Fold_no_batch_dim_int_input', + constructor=lambda: nn.Fold(3, 2, 1, 0, 1), + cpp_constructor_args='torch::nn::FoldOptions(3, 2).dilation(1).padding(0).stride(1)', + input_size=(16, 4), + ref=single_batch_reference_fn, + check_gradgrad=False, + test_cuda=True, + default_dtype=torch.double, + ), + dict( + module_name='RReLU', + constructor_args=(0.1, 0.9), + cpp_constructor_args='torch::nn::RReLUOptions().lower(0.1).upper(0.9)', + input_size=(), + desc='with_up_down_scalar', + test_cuda=False, + default_dtype=torch.double, + ), + dict( + module_name='PairwiseDistance', + input_fn=lambda: (torch.randn(10, 8), torch.randn(10, 8)), + default_dtype=torch.double, + ), + dict( + module_name='PairwiseDistance', + input_fn=lambda: (torch.randn(10, 1), torch.randn(10, 8)), + desc='broadcast_lhs', + default_dtype=torch.double, + ), + dict( + module_name='PairwiseDistance', + input_fn=lambda: (torch.randn(10, 8), torch.randn(1, 8)), + desc='broadcast_rhs', + default_dtype=torch.double, + ), + dict( + module_name='PairwiseDistance', + constructor_args=(1.5, 1e-05, True), + cpp_constructor_args='torch::nn::PairwiseDistanceOptions().p(1.5).eps(1e-05).keepdim(true)', + input_fn=lambda: (torch.randn(10, 8), torch.randn(10, 8)), + desc='with_non_default_args', + default_dtype=torch.double, + ), + dict( + module_name='PairwiseDistance', + input_fn=lambda: (torch.randn(8), torch.randn(8)), + reference_fn=single_batch_reference_fn, + desc='no_batch_dim', + default_dtype=torch.double, + ), + dict( + module_name='TransformerEncoderLayer', + constructor_args=(4, 2, 16, 0.0), + cpp_constructor_args='''torch::nn::TransformerEncoderLayerOptions(4, 2) + .dim_feedforward(16) + .dropout(0.0)''', + input_size=(2, 3, 4), + desc='relu_activation', + with_tf32=True, + tf32_precision=0.1, + # TODO(#50743): figure out the error + # RuntimeError: The size of tensor a (6) must match the size of tensor b (4) + # at non-singleton dimension 2 + check_batched_grad=False, + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + module_name='TransformerEncoderLayer', + constructor_args=(4, 2, 8, 0.0, F.gelu), + cpp_constructor_args='''torch::nn::TransformerEncoderLayerOptions(4, 2) + .dim_feedforward(8) + .dropout(0.0) + .activation(torch::kGELU)''', + input_size=(2, 3, 4), + check_gradgrad=False, + desc='gelu_activation', + with_tf32=True, + tf32_precision=0.08 if SM90OrLater else 0.05, + default_dtype=torch.double, + ), + dict( + module_name='TransformerDecoderLayer', + constructor_args=(4, 2, 8, 0.0), + cpp_constructor_args='''torch::nn::TransformerDecoderLayerOptions(4, 2) + .dim_feedforward(8) + .dropout(0.0)''', + input_fn=lambda: (torch.rand(3, 3, 4), torch.rand(2, 3, 4)), + check_gradgrad=False, + desc='relu_activation', + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='TransformerDecoderLayer', + constructor_args=(4, 2, 8, 0.0, F.gelu), + cpp_constructor_args='''torch::nn::TransformerDecoderLayerOptions(4, 2) + .dim_feedforward(8) + .dropout(0.0) + .activation(torch::kGELU)''', + input_fn=lambda: (torch.rand(3, 3, 4), torch.rand(2, 3, 4)), + check_gradgrad=False, + desc='gelu_activation', + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + dict( + module_name='Transformer', + constructor_args=(4, 2, 2, 2, 8, 0.0, F.relu), + cpp_constructor_args='''torch::nn::TransformerOptions() + .d_model(4) + .nhead(2) + .num_encoder_layers(2) + .num_decoder_layers(2) + .dim_feedforward(8) + .dropout(0.0) + .activation(torch::kReLU)''', + input_fn=lambda:(torch.rand(3, 3, 4), torch.rand(2, 3, 4), torch.rand(3, 3)), + check_gradgrad=False, + desc='multilayer_coder', + with_tf32=True, + tf32_precision=0.05 if SM90OrLater else 0.03, + default_dtype=torch.double, + ), + dict( + module_name='Linear', + constructor_args=(3, 5), + cpp_constructor_args='torch::nn::LinearOptions(3, 5)', + input_fn=lambda: torch.rand(3), + reference_fn=lambda i, p, _: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1], + desc="no_batch_dim", + with_tf32=True, + tf32_precision=0.005, + default_dtype=torch.double, + ), + dict( + module_name='Flatten', + cpp_constructor_args='torch::nn::FlattenOptions().start_dim(-3).end_dim(-1)', + constructor_args=(-3, -1), + input_size=(3, 4, 5), + reference_fn=single_batch_reference_fn, + desc="no_batch_dim", + default_dtype=torch.double, + ), + dict( + module_name='Unflatten', + cpp_constructor_args='torch::nn::UnflattenOptions(-2, {2, 2})', + constructor_args=(-2, torch.Size([2, 2])), + input_size=(3, 4, 5), + reference_fn=single_batch_reference_fn, + desc="no_batch_dim", + default_dtype=torch.double, + ), + dict( + module_name='LayerNorm', + constructor_args=([56, 56, 56], 1e-5, False), + cpp_constructor_args='torch::nn::LayerNormOptions({56, 56, 56}).eps(1e-5).elementwise_affine(false)', + input_size=(4, 56, 56, 56), + cudnn=True, + check_eval=True, + gradcheck_fast_mode=True, + check_half=True, + desc='3d_no_affine_large_feature', + ), +] + +# add conv padding mode tests: +for padding_mode, cpp_padding_mode in zip( + ['reflect', 'circular', 'replicate', 'zeros'], + ['torch::kReflect', 'torch::kCircular', 'torch::kReplicate', 'torch::kZeros']): + # conv signature: + # in_channels, out_channels, kernel_size, stride=1, + # padding=0, dilation=1, groups=1, + # bias=True, padding_mode='zeros' + for d in (1, 2, 3): + if d == 3 and padding_mode == 'reflect': + # FIXME: remove after implementing reflection pad 3d + # https://github.com/pytorch/pytorch/issues/27655 + continue + padding = tuple(range(1, d + 1)) + cpp_padding = '{' + ', '.join(map(str, padding)) + '}' + input_size = (2, 2) + (4,) * d + output_size = (2, 3) + tuple(p + 1 for p in padding) # simplified from `(4 + 2 * p - 3) // 2 + 1` + new_module_tests.append( + dict( + module_name=f'Conv{d}d', + constructor_args=(2, 3, 3, 2, padding, 1, 1, True, padding_mode), + cpp_constructor_args=f'''torch::nn::Conv{d}dOptions(2, 3, 3) + .stride(2) + .padding({cpp_padding}) + .dilation(1) + .groups(1) + .bias(true) + .padding_mode({cpp_padding_mode})''', + input_size=input_size, + output_size=output_size, + cudnn=True, + desc=f'{padding_mode}_stride2_pad2', + with_tf32=True, + tf32_precision=0.05, + default_dtype=torch.double, + ), + ) + +# Check that non linear activations work with no batch dimensions +non_linear_activations_no_batch = [ + 'ELU', 'Hardshrink', 'Hardsigmoid', 'Hardtanh', 'Hardswish', 'LeakyReLU', + 'LogSigmoid', 'PReLU', 'ReLU', 'ReLU6', 'RReLU', 'SELU', 'CELU', 'GELU', 'GLU', + 'Sigmoid', 'SiLU', 'Mish', 'Softplus', 'Softshrink', 'Softsign', 'Tanh', + 'Tanhshrink', 'Threshold' +] +non_linear_activations_extra_info: Dict[str, dict] = { + 'CELU': {'constructor_args': (2.,), 'default_dtype': torch.double}, + 'Threshold': {'constructor_args': (2., 1.)}, + 'Hardsigmoid': {'check_gradgrad': False, 'check_jit': False, 'default_dtype': torch.double}, + 'Hardswish': {'check_gradgrad': False, 'check_jit': False, 'default_dtype': torch.double}, + # For RRelu, test that compare CPU and GPU results fail because RNG + # is different between CPU and GPU + 'RReLU': {'test_cuda': False, 'default_dtype': torch.double}, + 'ELU': {'default_dtype': torch.double}, + 'GELU': {'default_dtype': torch.double}, + 'GLU': {'default_dtype': torch.double}, + 'Hardshrink': {'default_dtype': torch.double}, + 'Hardtanh': {'default_dtype': torch.double}, + 'LeakyReLU': {'default_dtype': torch.double}, + 'LogSigmoid': {'default_dtype': torch.double}, + 'Mish': {'default_dtype': torch.double}, + 'PReLU': {'default_dtype': torch.double}, + 'ReLU6': {'default_dtype': torch.double}, + 'ReLU': {'default_dtype': torch.double}, + 'SELU': {'default_dtype': torch.double}, + 'SiLU': {'default_dtype': torch.double}, + 'Sigmoid': {'default_dtype': torch.double}, + 'Softplus': {'default_dtype': torch.double}, + 'Softshrink': {'default_dtype': torch.double}, + 'Softsign': {'default_dtype': torch.double}, + 'Tanh': {'default_dtype': torch.double}, + 'Tanhshrink': {'default_dtype': torch.double}, +} +for non_linear_activation in non_linear_activations_no_batch: + activation_test_info = dict( + module_name=non_linear_activation, + input_size=(4,), + reference_fn=single_batch_reference_fn, + desc='no_batch_dim', + test_cpp_api_parity=False, + ) + extra_info = non_linear_activations_extra_info.get(non_linear_activation, {}) + activation_test_info.update(extra_info) + new_module_tests.append(activation_test_info) + + +def kldivloss_reference(input, target, reduction='mean'): + result = target * (target.log() - input) + if reduction == 'mean': + return result.mean() + elif reduction == 'sum': + return result.sum() + elif reduction == 'batchmean' and result.dim() != 0: + return result.sum() / result.size(0) + return result + + +def kldivloss_log_target_reference(input, target, reduction='mean'): + result = torch.exp(target) * (target - input) + if reduction == 'mean': + return result.mean() + elif reduction == 'sum': + return result.sum() + elif reduction == 'batchmean' and result.dim() != 0: + return result.sum() / result.size(0) + return result + + +def nlllossNd_reference(input, target, weight=None, ignore_index=-100, + reduction='mean'): + assert input.dim() >= 3 + N = input.size(0) + C = input.size(1) + out_size = (N,) + input.size()[2:] + output = torch.zeros(out_size).type_as(input) + + if weight is None: + weight = torch.ones(C).type_as(input) + total_weight = 0 + for tup in product(*[range(size) for size in out_size]): + t_nx = target[tup] + norm = 0. if ignore_index == t_nx else weight[t_nx].item() + input_index = list(tup) + input_index.insert(1, t_nx) + output[tup] = -input[tuple(input_index)] * norm + total_weight += norm + + if reduction == 'mean': + return output.sum() / total_weight + elif reduction == 'sum': + return output.sum() + return output + + +def cross_entropy_loss_prob_target_reference(input, target, weight=None, reduction='mean', + label_smoothing=0.0): + assert input.dim() >= 2 + + input = torch.log_softmax(input, 1) + C = input.size(1) + if weight is None: + weight = torch.ones(C).type_as(input) + weight = weight.view(1, C, *(1 for _ in input.shape[2:])) + + if label_smoothing > 0.0: + assert label_smoothing <= 1.0 + target = (target * (1 - label_smoothing) + label_smoothing / C) + + output = -(input * target * weight).sum(dim=1) + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +def cross_entropy_loss_indices_target_reference(input, target, weight=None, ignore_index=-100, + reduction='mean', label_smoothing=0.0): + log_softmax_input = torch.log_softmax(input, 1) + nllloss = F.nll_loss( + log_softmax_input, + target, + weight, + ignore_index=ignore_index, + reduction=reduction) + + if label_smoothing == 0.0: + return nllloss + + assert 0.0 < label_smoothing <= 1.0 + + input = torch.log_softmax(input, 1) + C = input.size(1) + if weight is not None: + input = input * weight.view(1, C, *(1 for _ in input.shape[2:])) + + smooth_loss = -torch.sum(input, 1) + + ignore_mask = target == ignore_index + smooth_loss.masked_fill_(ignore_mask, 0.0) + + if reduction == 'mean': + if weight is not None: + # TODO: This code can path can be removed if #61309 is resolved + # loss is normalized by the weights to be consistent with nll_loss_nd + ret = torch.sum(smooth_loss) / weight.gather(0, target.masked_select(ignore_mask.logical_not()).flatten()).sum() + else: + ret = torch.mean(smooth_loss.masked_select(ignore_mask.logical_not())) + elif reduction == 'sum': + ret = torch.sum(smooth_loss) + else: + ret = smooth_loss + + return (1 - label_smoothing) * nllloss + ret * (label_smoothing / C) + + +def cross_entropy_loss_reference(input, target, weight=None, ignore_index=-100, reduction='mean', + label_smoothing=0.0): + if input.shape == target.shape: + return cross_entropy_loss_prob_target_reference( + input, + target, + weight=weight, + reduction=reduction, + label_smoothing=label_smoothing) + else: + return cross_entropy_loss_indices_target_reference( + input, target, weight=weight, reduction=reduction, + ignore_index=ignore_index, label_smoothing=label_smoothing + ) + + +def nllloss_reference(input, target, weight=None, ignore_index=-100, + reduction='mean'): + + def nll_loss_helper(input, target, weight, ignore_index): + if target == ignore_index: + return (0, 0) + norm = 1 if weight is None else weight[target] + result = -input[target] * norm + return (result, norm) + + losses_and_weights = [nll_loss_helper(i, t, weight, ignore_index) + for i, t in zip(input, target)] + losses, weights = zip(*losses_and_weights) + losses_tensor = input.new_tensor(losses) + if reduction == 'mean': + return sum(losses_tensor) / sum(weights) + elif reduction == 'sum': + return sum(losses_tensor) + else: + return losses_tensor + + +def smoothl1loss_reference(input, target, reduction='mean', beta=1.0): + abs_diff = (input - target).abs() + ge_beta_mask = (abs_diff >= beta).type_as(abs_diff) + lt_beta_mask = (abs_diff < beta).type_as(abs_diff) + # when beta <= 0 we should just use l1_loss + if beta == 0: + output = abs_diff + else: + output = ge_beta_mask * (abs_diff - 0.5 * beta) + lt_beta_mask * 0.5 * (abs_diff ** 2) / beta + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +def huberloss_reference(input, target, reduction='mean', delta=1.0): + abs_diff = (input - target).abs() + ge_delta_mask = (abs_diff >= delta) + lt_delta_mask = (abs_diff < delta) + output = ge_delta_mask * delta * (abs_diff - 0.5 * delta) + lt_delta_mask * 0.5 * (abs_diff ** 2) + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +def _multilabelmarginloss_reference(input, target): + targets = [] + for target_index in target: + if target_index < 0: + break + targets.append(target_index) + + sum = 0 + for target_index in targets: + for i in range(0, len(input)): + if i not in targets: + sum += max(0, 1 - input[target_index] + input[i]) + + return sum + + +def multilabelmarginloss_reference(input, target, reduction='mean'): + # make everything 2-dimensional + input_dim = input.dim() + if input.dim() < 2: + assert target.dim() < 2 + input = input.unsqueeze(0) if input.dim() == 1 else input.unsqueeze(0).unsqueeze(0) + target = target.unsqueeze(0) if target.dim() == 1 else target.unsqueeze(0).unsqueeze(0) + + n = input.size(0) + dim = input.size(1) + output = input.new(n).zero_() + for i in range(0, n): + output[i] = _multilabelmarginloss_reference(input[i], target[i]) + + if reduction == 'mean': + return output.mean() / dim + elif reduction == 'sum': + return output.sum() / dim + elif input_dim < 2: + # we know we have (1, C) X (1, C) -> (1,), so squeeze will get us + # back to correct dimensionality + return output.squeeze() / dim + else: + return output / dim + + +def hingeembeddingloss_reference(input, target, margin=1.0, reduction='mean'): + margin_clamp = (margin - input).clamp(min=0).type_as(input) + output = torch.where(target == 1, input, margin_clamp) + + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +def softmarginloss_reference(input, target, reduction='mean'): + output = (1 + (-input * target).exp()).log() + + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +def _multimarginloss_reference(input, target_idx, p, margin, weight): + if weight is None: + weight = input.new(len(input)).fill_(1) + + output = 0 + for i in range(0, len(input)): + if i != target_idx: + output += max(0, weight[target_idx] * (margin - input[target_idx] + input[i]) ** p) + return output + + +def multimarginloss_reference(input, target, p=1, margin=1, weight=None, reduction='mean'): + if input.dim() < 2: + input = input.unsqueeze(0) if input.dim() == 1 else input.unsqueeze(0).unsqueeze(0) + + target_dim = target.dim() + if target.dim() == 0: + target = target.unsqueeze(0) + + n = input.size(0) + dim = input.size(1) + output = input.new(n) + for x in range(0, n): + output[x] = _multimarginloss_reference(input[x], target[x], p, margin, weight) + + if reduction == 'mean': + return output.mean() / dim + elif reduction == 'sum': + return output.sum() / dim + elif target_dim == 0: + return output.squeeze(0) / dim + return output / dim + + +def cosineembeddingloss_reference(input1, input2, target, margin=0, reduction='mean'): + def _cos(a, b): + cos = a.new(a.size(0)) + for i in range(0, a.size(0)): + cos[i] = (a[i] * b[i]).sum() / ((((a[i] * a[i]).sum() + 1e-12) * ((b[i] * b[i]).sum() + 1e-12)) ** 0.5) + return cos + + output = torch.where(target == 1, 1 - _cos(input1, input2), (_cos(input1, input2) - margin).clamp(min=0)) + + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +def tripletmarginloss_reference(anchor, positive, negative, margin=1.0, p=2, eps=1e-6, swap=False, + reduction='mean'): + d_p = torch.pairwise_distance(anchor, positive, p, eps) + d_n = torch.pairwise_distance(anchor, negative, p, eps) + if swap: + d_s = torch.pairwise_distance(positive, negative, p, eps) + d_n = torch.min(d_n, d_s) + + output = torch.clamp(margin + d_p - d_n, min=0.0) + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +def marginrankingloss_reference(input1, input2, target, margin=0, reduction='mean'): + output = (-target * (input1 - input2) + margin).clamp(min=0) + if reduction == 'mean': + return output.mean() + elif reduction == 'sum': + return output.sum() + return output + + +# this directly follows Graves et al's paper, in contrast to the production implementation, it does not use log-space +def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0, reduction='mean'): + input_lengths = torch.as_tensor(input_lengths, dtype=torch.long) + target_lengths = torch.as_tensor(target_lengths, dtype=torch.long) + dt = log_probs.dtype + log_probs = log_probs.double() # we need the accuracy as we are not in logspace + targets = targets.long() + cum_target_lengths = target_lengths.cumsum(0) + losses = [] + for i in range(log_probs.size(1)): + input_length = input_lengths[i].item() + target_length = target_lengths[i].item() + cum_target_length = cum_target_lengths[i].item() + targets_prime = targets.new_full((2 * target_length + 1,), blank) + if targets.dim() == 2: + targets_prime[1::2] = targets[i, :target_length] + else: + targets_prime[1::2] = targets[cum_target_length - target_length:cum_target_length] + probs = log_probs[:input_length, i].exp() + alpha = log_probs.new_zeros((target_length * 2 + 1,)) + alpha[0] = probs[0, blank] + alpha[1] = probs[0, targets_prime[1]] + mask_third = (targets_prime[:-2] != targets_prime[2:]) + for t in range(1, input_length): + alpha_next = alpha.clone() + alpha_next[1:] += alpha[:-1] + alpha_next[2:] += torch.where(mask_third, alpha[:-2], alpha.new_zeros(1)) + alpha = probs[t, targets_prime] * alpha_next + losses.append(-alpha[-2:].sum().log()[None]) + output = torch.cat(losses, 0) + if reduction == 'mean': + return (output / target_lengths.to(dtype=output.dtype, device=output.device)).mean() + elif reduction == 'sum': + return output.sum() + output = output.to(dt) + return output + + +loss_reference_fns: Dict['str', Callable] = { + 'KLDivLoss': kldivloss_reference, + 'KLDivLoss_log_target': kldivloss_log_target_reference, + 'NLLLoss': nllloss_reference, + 'NLLLossNd': nlllossNd_reference, + 'SmoothL1Loss': smoothl1loss_reference, + 'HuberLoss': huberloss_reference, + 'MultiLabelMarginLoss': multilabelmarginloss_reference, + 'HingeEmbeddingLoss': hingeembeddingloss_reference, + 'SoftMarginLoss': softmarginloss_reference, + 'MultiMarginLoss': multimarginloss_reference, + 'CosineEmbeddingLoss': cosineembeddingloss_reference, + 'TripletMarginLoss': tripletmarginloss_reference, + 'MarginRankingLoss': marginrankingloss_reference, + 'CTCLoss': ctcloss_reference, + 'CrossEntropyLoss': cross_entropy_loss_reference +} + + +criterion_tests = [ + dict( + module_name='L1Loss', + input_size=(2, 3, 4), + target_fn=lambda: torch.randn((2, 3, 4), requires_grad=True), + reference_fn=lambda i, t, _: 1. / i.numel() * + sum((a - b).abs().sum() for a, b in zip(i, t)), + check_complex=True, + default_dtype=torch.double, + ), + dict( + module_name='NLLLoss', + input_fn=lambda: torch.rand(15, 10).log(), + target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(), + reference_fn=lambda i, t, m: + nllloss_reference(i, t, reduction=get_reduction(m)), + check_sum_reduction=True, + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='NLLLoss', + constructor_args=(None, None, 2), + cpp_constructor_args='torch::nn::NLLLossOptions().weight({}).ignore_index(2)', + input_fn=lambda: torch.rand(15, 10).log(), + target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(), + reference_fn=lambda i, t, _: nllloss_reference(i, t, ignore_index=2), + desc='ignore_index', + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='NLLLoss', + constructor_args_fn=lambda: (torch.rand(10),), + cpp_constructor_args='torch::nn::NLLLossOptions().weight(torch::rand(10))', + input_fn=lambda: torch.rand(15, 10).add(1e-2).log(), + target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(), + reference_fn=lambda i, t, m: + nllloss_reference(i, t, weight=get_weight(m)), + desc='weights', + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='NLLLoss', + constructor_args_fn=lambda: (torch.rand(10), None, 2), + cpp_constructor_args='torch::nn::NLLLossOptions().weight(torch::rand(10)).ignore_index(2)', + input_fn=lambda: torch.rand(15, 10).add(1e-2).log(), + target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(), + reference_fn=lambda i, t, m: + nllloss_reference(i, t, weight=get_weight(m), ignore_index=2), + desc='weights_ignore_index', + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='NLLLoss', + constructor_args_fn=lambda: (torch.rand(10), None, -1), + cpp_constructor_args='torch::nn::NLLLossOptions().weight(torch::rand(10)).ignore_index(-1)', + input_fn=lambda: torch.rand(15, 10).add(1e-2).log(), + target_fn=lambda: torch.empty(15).uniform_().mul(10 + 1).floor().long() - 1, + reference_fn=lambda i, t, m: + nllloss_reference(i, t, weight=get_weight(m), ignore_index=-1), + desc='weights_ignore_index_neg', + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='KLDivLoss', + input_fn=lambda: torch.rand(10, 10).log(), + target_fn=lambda: torch.rand(10, 10), + reference_fn=lambda i, t, m: + kldivloss_reference(i, t, get_reduction(m)), + check_sum_reduction=True, + default_dtype=torch.double, + ), + dict( + module_name='KLDivLoss', + constructor=wraps(nn.KLDivLoss)(partial(nn.KLDivLoss, log_target=True)), + cpp_constructor_args='torch::nn::KLDivLossOptions().log_target(true)', + input_fn=lambda: torch.rand(10, 10).log(), + target_fn=lambda: torch.rand(10, 10).log(), + reference_fn=lambda i, t, m: + kldivloss_log_target_reference(i, t, get_reduction(m)), + check_sum_reduction=True, + desc='log_target', + default_dtype=torch.double, + ), + dict( + module_name='MSELoss', + input_fn=lambda: torch.rand((2, 3, 4, 5), dtype=torch.double), + target_fn=lambda: torch.randn((2, 3, 4, 5), dtype=torch.double, requires_grad=True), + reference_fn=lambda i, t, m: ((i - t).abs().pow(2).sum() / (i.numel() + if get_reduction(m) == 'mean' else 1)), + check_sum_reduction=True, + default_dtype=torch.double, + ), + dict( + module_name='BCELoss', + input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2), + target_fn=lambda: torch.randn(15, 10).gt(0).to(torch.get_default_dtype()), + reference_fn=lambda i, t, m: -(t * i.log() + (1 - t) * (1 - i).log()).sum() / + (i.numel() if get_reduction(m) else 1), + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='BCELoss', + constructor_args_fn=lambda: (torch.rand(10),), + cpp_constructor_args='torch::nn::BCELossOptions().weight(torch::rand(10))', + input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2), + target_fn=lambda: torch.randn(15, 10).gt(0).to(torch.get_default_dtype()), + reference_fn=lambda i, t, m: -((t * i.log() + (1 - t) * (1 - i).log()) * get_weight(m)).sum() / + (i.numel() if get_reduction(m) else 1), + desc='weights', + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='CrossEntropyLoss', + input_size=(15, 10), + target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(), + default_dtype=torch.double, + ), + dict( + module_name='CrossEntropyLoss', + constructor_args_fn=lambda: (torch.rand(10),), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().weight(torch::rand(10))', + input_size=(15, 10), + target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(), + desc='weights', + default_dtype=torch.double, + ), + dict( + module_name='HingeEmbeddingLoss', + input_size=(10,), + target_fn=lambda: torch.randn(10).gt(0).to(torch.get_default_dtype()).mul_(2).sub(1), + reference_fn=lambda i, t, m: + hingeembeddingloss_reference(i, t, reduction=get_reduction(m)), + check_sum_reduction=True, + default_dtype=torch.double, + ), + dict( + module_name='HingeEmbeddingLoss', + constructor_args=(0.5,), + cpp_constructor_args='torch::nn::HingeEmbeddingLossOptions().margin(0.5)', + input_size=(10,), + target_fn=lambda: torch.randn(10).gt(0).to(torch.get_default_dtype()).mul_(2).sub(1), + reference_fn=lambda i, t, m: + hingeembeddingloss_reference(i, t, margin=0.5, reduction=get_reduction(m)), + desc='margin', + check_sum_reduction=True, + default_dtype=torch.double, + ), + dict( + module_name='MultiLabelMarginLoss', + input_size=(10,), + target_fn=lambda: torch.rand(10).mul(10).floor().long(), + reference_fn=lambda i, t, m: + multilabelmarginloss_reference(i, t, reduction=get_reduction(m)), + desc="1d", + check_sum_reduction=True, + check_gradgrad=False, + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='MultiLabelMarginLoss', + input_size=(5, 10), + target_fn=lambda: torch.rand(5, 10).mul(10).floor().long(), + reference_fn=lambda i, t, m: + multilabelmarginloss_reference(i, t, reduction=get_reduction(m)), + check_sum_reduction=True, + check_gradgrad=False, + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='MultiLabelSoftMarginLoss', + input_size=(5, 10), + target_fn=lambda: torch.rand(5, 10).mul(2).floor(), + reference_fn=lambda i, t, m: -(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()).sum() / i.numel(), + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + module_name='MultiMarginLoss', + input_size=(5, 10), + target_fn=lambda: torch.rand(5).mul(8).floor().long(), + reference_fn=lambda i, t, m: + multimarginloss_reference(i, t, reduction=get_reduction(m)), + check_sum_reduction=True, + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + module_name='MultiMarginLoss', + input_size=(10,), + target_fn=lambda: torch.rand(1).mul(8).floor().long(), + reference_fn=lambda i, t, m: + multimarginloss_reference(i, t, reduction=get_reduction(m)), + desc='1d', + check_sum_reduction=True, + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + module_name='MultiMarginLoss', + constructor_args=(2,), + cpp_constructor_args='torch::nn::MultiMarginLossOptions().p(2)', + input_fn=lambda: torch.rand(5, 10).clamp_(1e-2, 1 - 1e-2), + target_fn=lambda: torch.rand(5).mul(8).floor().long(), + reference_fn=lambda i, t, m: + multimarginloss_reference(i, t, p=2, reduction=get_reduction(m)), + desc='p', + check_sum_reduction=True, + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + module_name='MultiMarginLoss', + constructor_args=(1, 0.5), + cpp_constructor_args='torch::nn::MultiMarginLossOptions().p(1).margin(0.5)', + legacy_constructor_args=(1, None, 0.5), + input_size=(5, 10), + target_fn=lambda: torch.rand(5).mul(8).floor().long(), + reference_fn=lambda i, t, m: + multimarginloss_reference(i, t, margin=0.5, reduction=get_reduction(m)), + desc='margin', + check_sum_reduction=True, + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + module_name='MultiMarginLoss', + constructor_args=(1, 1., torch.rand(10, dtype=torch.double)), + cpp_constructor_args='torch::nn::MultiMarginLossOptions().p(1).margin(1.).weight(torch::rand(10).to(torch::kFloat64))', + legacy_constructor_args=(1, torch.rand(10, dtype=torch.double)), + input_fn=lambda: torch.rand(5, 10, dtype=torch.double), + target_fn=lambda: torch.rand(5).mul(8).floor().long(), + reference_fn=lambda i, t, m: + multimarginloss_reference(i, t, weight=get_weight(m), reduction=get_reduction(m)), + desc='weights', + check_sum_reduction=True, + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + module_name='SmoothL1Loss', + input_size=(5, 10), + target_fn=lambda: torch.randn((5, 10), requires_grad=True), + check_sum_reduction=True, + reference_fn=lambda i, t, m, b=1.0: + smoothl1loss_reference(i, t, reduction=get_reduction(m), beta=b), + default_dtype=torch.double, + ), + dict( + module_name='HuberLoss', + input_size=(5, 10), + target_fn=lambda: torch.randn((5, 10), requires_grad=True), + check_sum_reduction=True, + check_half=True, + check_bfloat16=True, + reference_fn=lambda i, t, m: + huberloss_reference(i, t, reduction=get_reduction(m)), + default_dtype=torch.double, + ), + dict( + module_name='SoftMarginLoss', + input_size=(5, 5), + target_fn=lambda: torch.randn(5, 5).sign(), + reference_fn=lambda i, t, m: + softmarginloss_reference(i, t, reduction=get_reduction(m)), + check_sum_reduction=True, + default_dtype=torch.double, + ), + dict( + module_name='CosineEmbeddingLoss', + input_fn=lambda: (torch.rand(15, 10, dtype=torch.double), torch.rand(15, 10, dtype=torch.double)), + target_fn=lambda: torch.randn(15, dtype=torch.double).sign(), + reference_fn=lambda i, t, m: + cosineembeddingloss_reference(i[0], i[1], t, reduction=get_reduction(m)), + check_sum_reduction=True, + ), + dict( + module_name='CosineEmbeddingLoss', + constructor_args=(0.7,), + cpp_constructor_args='torch::nn::CosineEmbeddingLossOptions().margin(0.7)', + input_fn=lambda: (torch.rand(15, 10, dtype=torch.double), torch.rand(15, 10, dtype=torch.double)), + target_fn=lambda: torch.randn(15, dtype=torch.double).sign(), + reference_fn=lambda i, t, m: + cosineembeddingloss_reference(i[0], i[1], t, margin=0.7, reduction=get_reduction(m)), + desc='margin', + check_sum_reduction=True, + ), + dict( + module_name='MarginRankingLoss', + input_fn=lambda: (torch.randn(50).mul(10), torch.randn(50).mul(10)), + target_fn=lambda: torch.randn(50).sign(), + reference_fn=lambda i, t, m: + marginrankingloss_reference(i[0], i[1], t, reduction=get_reduction(m)), + check_sum_reduction=True, + default_dtype=torch.double, + ), + dict( + module_name='MarginRankingLoss', + constructor_args=(0.5,), + cpp_constructor_args='torch::nn::MarginRankingLossOptions().margin(0.5)', + input_fn=lambda: (torch.randn(50).mul(10), torch.randn(50).mul(10)), + target_fn=lambda: torch.randn(50).sign(), + reference_fn=lambda i, t, m: + marginrankingloss_reference(i[0], i[1], t, margin=0.5, reduction=get_reduction(m)), + desc='margin', + check_sum_reduction=True, + default_dtype=torch.double, + ), + dict( + module_name='BCEWithLogitsLoss', + input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2), + target_fn=lambda: torch.randn(15, 10).gt(0).to(torch.get_default_dtype()), + default_dtype=torch.double, + ), + dict( + module_name='BCEWithLogitsLoss', + constructor_args=(torch.rand(10, dtype=torch.double),), + cpp_constructor_args='torch::nn::BCEWithLogitsLossOptions().weight(torch::rand(10).to(torch::kFloat64))', + input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2), + target_fn=lambda: torch.randn(15, 10).gt(0).to(torch.get_default_dtype()), + desc='weights', + default_dtype=torch.double, + ), + dict( + module_name='BCEWithLogitsLoss', + constructor_args=(torch.rand((), dtype=torch.double),), + cpp_constructor_args='torch::nn::BCEWithLogitsLossOptions().weight(torch::rand({}).to(torch::kFloat64))', + input_fn=lambda: torch.rand(()).clamp_(1e-2, 1 - 1e-2), + target_fn=lambda: torch.randn(()).gt(0).to(torch.get_default_dtype()), + desc='scalar_weights', + default_dtype=torch.double, + ), + dict( + module_name='NLLLoss', + input_size=(2, 3, 5, 5), + target_fn=lambda: torch.rand(2, 5, 5).mul(3).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)), + check_sum_reduction=True, + desc='2d', + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='NLLLoss', + constructor_args_fn=lambda: (torch.rand(3),), + cpp_constructor_args='torch::nn::NLLLossOptions().weight(torch::rand(3))', + input_size=(2, 3, 5, 5), + target=torch.rand(2, 5, 5).mul(3).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['NLLLossNd'](i, t, weight=get_weight(m)), + desc='2d_weights', + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='NLLLoss', + constructor_args=(None, None, 1), + cpp_constructor_args='torch::nn::NLLLossOptions().weight({}).ignore_index(1)', + input_size=(2, 3, 5, 5), + target_fn=lambda: torch.rand(2, 5, 5).mul(3).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['NLLLossNd'](i, t, ignore_index=1), + desc='2d_ignore_index', + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='NLLLoss', + input_size=(2, 3, 5, 5, 2, 2), + target_fn=lambda: torch.rand(2, 5, 5, 2, 2).mul(3).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)), + check_sum_reduction=True, + desc='higher_dim', + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='NLLLoss', + input_size=(2, 3, 5), + target_fn=lambda: torch.rand(2, 5).mul(3).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)), + check_sum_reduction=True, + desc='dim_is_3', + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='CrossEntropyLoss', + input_size=(2, 3, 5, 5), + target_fn=lambda: torch.rand(2, 5, 5).mul(3).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m)), + check_sum_reduction=True, + desc='2d', + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + module_name='CrossEntropyLoss', + constructor_args_fn=lambda: (torch.rand(3),), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().weight(torch::rand(3))', + input_size=(2, 3, 5, 5), + target=torch.rand(2, 5, 5).mul(3).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, weight=get_weight(m)), + desc='2d_weights', + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + module_name='CrossEntropyLoss', + constructor_args=(None, None, 1), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().weight({}).ignore_index(1)', + input_size=(2, 3, 5, 5), + target_fn=lambda: torch.rand(2, 5, 5).mul(3).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, ignore_index=1), + desc='2d_ignore_index', + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + module_name='CrossEntropyLoss', + input_size=(2, 3, 5, 5, 2, 2), + target_fn=lambda: torch.rand(2, 5, 5, 2, 2).mul(3).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m)), + check_sum_reduction=True, + desc='higher_dim', + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + module_name='CrossEntropyLoss', + input_size=(2, 3, 5), + target_fn=lambda: torch.rand(2, 5).mul(3).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m)), + check_sum_reduction=True, + desc='dim_is_3', + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + module_name='CrossEntropyLoss', + input_size=(5, 3), + target_fn=lambda: torch.rand(5, 3).softmax(dim=1), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m)), + check_sum_reduction=True, + desc='2d_prob_target', + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + module_name='CrossEntropyLoss', + input_size=(5, 3, 4), + target_fn=lambda: torch.rand(5, 3, 4).softmax(dim=1), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m)), + check_sum_reduction=True, + desc='3d_prob_target', + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + module_name='CrossEntropyLoss', + input_size=(5, 3, 4, 2), + target_fn=lambda: torch.rand(5, 3, 4, 2).softmax(dim=1), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m)), + check_sum_reduction=True, + desc='4d_prob_target', + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + fullname='CrossEntropyLoss_2d_prob_target_smoothing_sum_reduction', + constructor=lambda *args, **kwargs: nn.CrossEntropyLoss(reduction='sum', + label_smoothing=0.15), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).reduction(torch::kSum)', + input_size=(5, 3), + target_fn=lambda: torch.rand(5, 3).softmax(dim=1), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15), + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + fullname='CrossEntropyLoss_2d_prob_target_smoothing', + constructor=lambda *args: nn.CrossEntropyLoss(label_smoothing=0.15), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15)', + input_size=(5, 3), + target_fn=lambda: torch.rand(5, 3).softmax(dim=1), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15), + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + fullname='CrossEntropyLoss_2d_prob_target_smoothing_weight', + constructor_args_fn=lambda: (torch.rand(3).abs(),), + constructor=lambda weight: nn.CrossEntropyLoss(weight, label_smoothing=0.15), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).weight(torch::rand(3).abs())', + input_size=(5, 3), + target_fn=lambda: torch.rand(5, 3).softmax(dim=1), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), weight=get_weight(m), label_smoothing=0.15), + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + fullname='CrossEntropyLoss_3d_prob_target_smoothing_sum_reduction', + constructor=lambda *args: nn.CrossEntropyLoss(reduction='sum', + label_smoothing=0.15), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).reduction(torch::kSum)', + input_size=(5, 3, 4), + target_fn=lambda: torch.rand(5, 3, 4).softmax(dim=1), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15), + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + fullname='CrossEntropyLoss_3d_prob_target_smoothing', + constructor=lambda *args: nn.CrossEntropyLoss(label_smoothing=0.15), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15)', + input_size=(5, 3, 4), + target_fn=lambda: torch.rand(5, 3, 4).softmax(dim=1), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15), + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + fullname='CrossEntropyLoss_3d_indices_target_smoothing', + constructor=lambda *args: nn.CrossEntropyLoss(label_smoothing=0.15), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15)', + input_size=(2, 3, 5), + target_fn=lambda: torch.rand(2, 5).mul(3).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15), + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + fullname='CrossEntropyLoss_3d_indices_target_smoothing_ignore_index', + constructor=lambda *args: nn.CrossEntropyLoss(label_smoothing=0.15, ignore_index=1), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).ignore_index(1)', + input_size=(2, 3, 5), + target_fn=lambda: torch.rand(2, 5).mul(3).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15, ignore_index=1), + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + fullname='CrossEntropyLoss_3d_indices_target_smoothing_sum_reduction', + constructor=lambda *args: nn.CrossEntropyLoss(reduction='sum', label_smoothing=0.15), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).reduction(torch::kSum)', + input_size=(2, 3, 5), + target_fn=lambda: torch.rand(2, 5).mul(3).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15), + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + fullname='CrossEntropyLoss_3d_indices_target_smoothing_sum_reduction_ignore_index', + constructor=lambda *args: nn.CrossEntropyLoss(reduction='sum', label_smoothing=0.15, + ignore_index=1), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).reduction(torch::kSum).ignore_index(1)', + input_size=(2, 3, 5), + target_fn=lambda: torch.rand(2, 5).mul(3).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15, ignore_index=1), + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + fullname='CrossEntropyLoss_2d_indices_target_smoothing', + constructor=lambda *args: nn.CrossEntropyLoss(label_smoothing=0.15), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15)', + input_size=(15, 10), + target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15), + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + fullname='CrossEntropyLoss_2d_indices_target_smoothing_sum_reduction', + constructor=lambda *args: nn.CrossEntropyLoss(reduction='sum', label_smoothing=0.15), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).reduction(torch::kSum)', + input_size=(15, 10), + target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15), + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + fullname='CrossEntropyLoss_2d_indices_target_smoothing_ignore_index', + constructor=lambda *args: nn.CrossEntropyLoss(label_smoothing=0.15, ignore_index=3), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).ignore_index(3)', + input_size=(15, 10), + target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), label_smoothing=0.15, ignore_index=3), + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + fullname='CrossEntropyLoss_2d_indices_target_smoothing_weight', + constructor_args_fn=lambda: (torch.rand(10).abs(),), + constructor=lambda weight: nn.CrossEntropyLoss(weight, label_smoothing=0.15), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().label_smoothing(0.15).weight(torch::rand(10).abs())', + input_size=(15, 10), + target_fn=lambda: torch.empty(15).uniform_().mul(10).floor().long(), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), weight=get_weight(m), label_smoothing=0.15), + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + module_name='CrossEntropyLoss', + constructor_args_fn=lambda: (torch.rand(3),), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().weight(torch::rand(3))', + input_size=(5, 3), + target_fn=lambda: torch.rand(5, 3).softmax(dim=1), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), weight=get_weight(m)), + check_sum_reduction=True, + desc='2d_prob_target_weights', + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + module_name='CrossEntropyLoss', + constructor_args_fn=lambda: (torch.rand(3),), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().weight(torch::rand(3))', + input_size=(5, 3, 4), + target_fn=lambda: torch.rand(5, 3, 4).softmax(dim=1), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), weight=get_weight(m)), + check_sum_reduction=True, + desc='3d_prob_target_weights', + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + module_name='CrossEntropyLoss', + constructor_args_fn=lambda: (torch.rand(3),), + cpp_constructor_args='torch::nn::CrossEntropyLossOptions().weight(torch::rand(3))', + input_size=(5, 3, 4, 2), + target_fn=lambda: torch.rand(5, 3, 4, 2).softmax(dim=1), + reference_fn=lambda i, t, m: + loss_reference_fns['CrossEntropyLoss'](i, t, reduction=get_reduction(m), weight=get_weight(m)), + check_sum_reduction=True, + desc='4d_prob_target_weights', + check_bfloat16=False, + default_dtype=torch.double, + ), + dict( + module_name='PoissonNLLLoss', # Default is log_input=True, full=False + input_size=(2, 3, 4, 5), + target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(), + reference_fn=lambda i, t, _: (i.exp() - t.mul(i)).mean(), + desc='no_full_loss', + default_dtype=torch.double, + ), + dict( + module_name='PoissonNLLLoss', + constructor_args=(False, False), # log_input=False, full=False + cpp_constructor_args='torch::nn::PoissonNLLLossOptions().log_input(false).full(false)', + input_fn=lambda: torch.randn(2, 3, 4, 5).abs_().add_(0.001), + target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(), + reference_fn=lambda i, t, _: (i - t.mul((i + 1e-8).log())).mean(), + desc='no_full_loss_no_log_input', + default_dtype=torch.double, + ), + dict( + module_name='PoissonNLLLoss', + constructor_args=(True, True), # log_input=True, full=True + cpp_constructor_args='torch::nn::PoissonNLLLossOptions().log_input(true).full(true)', + input_size=(2, 3, 4, 5), + target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(), + reference_fn=lambda i, t, _: + (i.exp() - t.mul(i) + (t.mul(t.log()) - t + 0.5 * (2. * pi * t).log()).masked_fill(t <= 1, 0)).mean(), + desc='full_loss', + default_dtype=torch.double, + ), + dict( + module_name='PoissonNLLLoss', + constructor_args=(False, True), # log_input=False, full=True + cpp_constructor_args='torch::nn::PoissonNLLLossOptions().log_input(false).full(true)', + input_fn=lambda: torch.randn(2, 3, 4, 5).abs_().add_(0.001), + target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(), + reference_fn=lambda i, t, _: ( + i - t.mul((i + 1e-8).log()) + (t.mul(t.log()) - t + 0.5 * (2. * pi * t).log()).masked_fill(t <= 1, 0) + ).mean(), + desc='full_loss_no_log_input', + default_dtype=torch.double, + ), + dict( + module_name='L1Loss', + input_size=(), + target_fn=lambda: torch.randn((), requires_grad=True), + reference_fn=lambda i, t, _: 1. / i.numel() * (i - t).abs().sum(), + desc='scalar', + check_complex=True, + default_dtype=torch.double, + ), + dict( + module_name='KLDivLoss', + input_fn=lambda: torch.rand(()).log(), + target_fn=lambda: torch.rand(()), + reference_fn=lambda i, t, m: + kldivloss_reference(i, t, get_reduction(m)), + check_sum_reduction=True, + desc='scalar', + default_dtype=torch.double, + ), + dict( + module_name='KLDivLoss', + constructor=wraps(nn.KLDivLoss)(partial(nn.KLDivLoss, log_target=True)), + cpp_constructor_args='torch::nn::KLDivLossOptions().log_target(true)', + input_fn=lambda: torch.rand(()).log(), + target_fn=lambda: torch.rand(()).log(), + reference_fn=lambda i, t, m: + kldivloss_log_target_reference(i, t, get_reduction(m)), + check_sum_reduction=True, + desc='scalar_log_target', + default_dtype=torch.double, + ), + dict( + module_name='MSELoss', + input_fn=lambda: torch.rand((), dtype=torch.double), + target_fn=lambda: torch.randn((), requires_grad=True, dtype=torch.double), + reference_fn=lambda i, t, m: ((i - t).abs().pow(2).sum() / + (i.numel() if get_reduction(m) == 'mean' else 1)), + check_sum_reduction=True, + desc='scalar', + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='MSELoss', + input_fn=lambda: torch.ones(5, 68, 64, 64, dtype=torch.float) / 10, + target_fn=lambda: torch.zeros(5, 68, 64, 64, dtype=torch.float), + reference_fn=lambda i, t, m: ((i - t).abs().pow(2).sum() / + (i.numel() if get_reduction(m) == 'mean' else 1)), + check_forward_only=True, + desc='prec', + check_bfloat16=True, + ), + dict( + module_name='BCELoss', + constructor_args_fn=lambda: (torch.rand(()),), + cpp_constructor_args='torch::nn::BCELossOptions().weight(torch::rand({}))', + input_fn=lambda: torch.rand(()).clamp_(1e-2, 1 - 1e-2), + target_fn=lambda: torch.rand(()).gt(0).to(torch.get_default_dtype()), + reference_fn=lambda i, t, m: -((t * i.log() + (1 - t) * (1 - i).log()) * get_weight(m)).sum() / + (i.numel() if get_reduction(m) == 'mean' else 1), + desc='scalar_weights', + check_bfloat16=True, + default_dtype=torch.double, + ), + dict( + module_name='HingeEmbeddingLoss', + constructor_args=(0.5,), + cpp_constructor_args='torch::nn::HingeEmbeddingLossOptions().margin(0.5)', + input_size=(), + target_fn=lambda: torch.randn(()).gt(0).to(torch.get_default_dtype()).mul_(2).sub(1), + desc='scalar_margin', + check_sum_reduction=True, + default_dtype=torch.double, + ), + dict( + module_name='SmoothL1Loss', + input_size=(), + target_fn=lambda: torch.randn((), requires_grad=True), + check_sum_reduction=True, + reference_fn=lambda i, t, m, b=1.0: + smoothl1loss_reference(i, t, reduction=get_reduction(m), beta=b), + desc='scalar', + default_dtype=torch.double, + ), + dict( + module_name='MultiLabelSoftMarginLoss', + constructor_args=(torch.rand(10),), + cpp_constructor_args='torch::nn::MultiLabelSoftMarginLossOptions().weight(torch::rand(10))', + input_fn=lambda: torch.randn(5, 10), + target_fn=lambda: torch.rand(5, 10).mul(2).floor(), + reference_fn=lambda i, t, m: -((t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()) * get_weight(m)).sum() / + (i.numel() if get_reduction(m) == 'mean' else i.size(1) if get_reduction(m) == 'sum' else 1), + desc='weights', + check_sum_reduction=True, + check_gradgrad=False, + default_dtype=torch.double, + ), + dict( + module_name='CTCLoss', + constructor_args=(14,), # blank=14 + extra_args=([50, 50, 50], [30, 25, 20]), # input_lengths, target_lengths + input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2), + target_fn=lambda: torch.randint(0, 14, (3, 30), dtype=torch.long), + reference_fn=lambda i, t, il, tl, m: + ctcloss_reference(i, t, il, tl, blank=14, reduction=get_reduction(m)), + desc='lengths_intlists', + check_forward_only=True, + check_sum_reduction=True, + check_gradgrad=False, + check_half=False, + # `CTCLoss` in C++ frontend doesn't accept integer list for `input_lengths` or `target_lengths` + test_cpp_api_parity=False, + check_jit=False, + default_dtype=torch.double, + ), + dict( + module_name='CTCLoss', + constructor_args=(14,), # blank=14 + cpp_constructor_args='torch::nn::CTCLossOptions().blank(14)', + extra_args=(torch.tensor([50, 50, 50]), torch.tensor([30, 25, 20])), # input_lengths, target_lengths + input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2), + target_fn=lambda: torch.randint(0, 14, (3, 30), dtype=torch.long), + reference_fn=lambda i, t, il, tl, m: + ctcloss_reference(i, t, il, tl, blank=14, reduction=get_reduction(m)), + desc='lengths_tensors', + check_forward_only=True, + check_sum_reduction=True, + check_gradgrad=False, + check_half=False, + default_dtype=torch.double, + ), + # Test is flaky + # See https://github.com/pytorch/pytorch/issues/29380. + # dict( + # module_name='CTCLoss', + # desc='1d_target', + # constructor_args=(14,), # blank=14 + # extra_args=([50, 50, 50], [30, 25, 20]), # input_lengths, target_lengths + # input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2), + # target_fn=lambda: torch.randint(0, 14, (3, 30), dtype=torch.long), + # reference_fn=lambda i, t, il, tl, m: + # ctcloss_reference(i, t, il, tl, blank=14, reduction=get_reduction(m)), + # check_sum_reduction=True, + # check_gradgrad=False, + # check_half=False, + # ), + dict( + module_name='CTCLoss', + desc='2d_int_target_lengths_intlists', + constructor_args=(0,), # blank=0 + extra_args=([50, 50, 50], [30, 25, 20]), # input_lengths, target_lengths + input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2), + target_fn=lambda: torch.randint(1, 15, (3, 30), dtype=torch.int), + reference_fn=lambda i, t, il, tl, m: + ctcloss_reference(i, t, il, tl, blank=0, reduction=get_reduction(m)), + check_forward_only=True, + check_sum_reduction=True, + check_gradgrad=False, + check_half=False, + # `CTCLoss` in C++ frontend doesn't accept integer list for `input_lengths` or `target_lengths` + test_cpp_api_parity=False, + check_jit=False, + default_dtype=torch.double, + ), + dict( + module_name='CTCLoss', + desc='2d_int_target_lengths_tensors', + constructor_args=(0,), # blank=0 + cpp_constructor_args='torch::nn::CTCLossOptions().blank(0)', + extra_args=(torch.tensor([50, 50, 50]), torch.tensor([30, 25, 20])), # input_lengths, target_lengths + input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2), + target_fn=lambda: torch.randint(1, 15, (3, 30), dtype=torch.int), + reference_fn=lambda i, t, il, tl, m: + ctcloss_reference(i, t, il, tl, blank=0, reduction=get_reduction(m)), + check_forward_only=True, + check_sum_reduction=True, + check_gradgrad=False, + check_half=False, + default_dtype=torch.double, + ), + dict( + module_name='CTCLoss', + desc='2d_lengths_tensors', + constructor_args=(0,), # blank=0 + cpp_constructor_args='torch::nn::CTCLossOptions().blank(0)', + extra_args=(torch.tensor([50, 50, 50]), torch.tensor([30, 25, 20])), # input_lengths, target_lengths + input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2), + target_fn=lambda: torch.randint(1, 15, (3, 30), dtype=torch.int), + reference_fn=lambda i, t, il, tl, m: + ctcloss_reference(i, t, il, tl, blank=0, reduction=get_reduction(m)), + check_forward_only=True, + check_sum_reduction=True, + check_gradgrad=False, + check_half=False, + default_dtype=torch.double, + ), +] + + +def single_batch_reference_criterion_fn(*args): + """Reference function for criterion supporting no batch dimensions. + + The criterion is passed the input and target in batched form with a single item. + The output is squeezed to compare with the no-batch input. + """ + criterion = args[-1] + + def unsqueeze_inp(inp): + if isinstance(inp, (list, tuple)): + return [t.unsqueeze(0) for t in inp] + return inp.unsqueeze(0) + + def flatten(xs): + result = [] + if isinstance(xs, (list, tuple)): + for x in xs: + result.extend(flatten(x)) + else: + result.append(xs) + return result + + single_batch_input_args = flatten([unsqueeze_inp(input) for input in args[:-1]]) + + output = criterion(*single_batch_input_args) + reduction = get_reduction(criterion) + + if reduction == 'none': + return output.squeeze(0) + # reduction is 'sum' or 'mean' which results in a scalar + return output + + +# Check that regression criterion work with no batch dimensions +regression_criterion_no_batch = [ + 'L1Loss', 'MSELoss', 'PoissonNLLLoss', 'HuberLoss', 'SmoothL1Loss' +] +reductions = ['none', 'mean', 'sum'] +for name, reduction in product(regression_criterion_no_batch, reductions): + regression_test_info = dict( + fullname=f"{name}_no_batch_dim_{reduction}", + constructor=lambda *args, name=name: getattr(nn, name)(reduction=reduction), + input_size=(3, ), + target_size=(3, ), + reference_fn=single_batch_reference_criterion_fn, + test_cpp_api_parity=False, + default_dtype=torch.double, + ) + criterion_tests.append(regression_test_info) + + +for reduction in reductions: + regression_test_info = dict( + fullname=f"KLDivLoss_no_batch_dim_{reduction}", + constructor=lambda: nn.KLDivLoss(reduction=reduction), + input_fn=lambda: torch.rand((3,)).log(), + target_fn=lambda: torch.rand((3,)), + reference_fn=single_batch_reference_criterion_fn, + test_cpp_api_parity=False, + default_dtype=torch.double, + ) + criterion_tests.append(regression_test_info) + + +# Check that classification criterion work with no batch dimensions +# List of tuples of (name, input_fn, target_fn) +classification_criterion_no_batch = [ + ( + 'BCELoss', + lambda: torch.sigmoid(torch.randn(9, dtype=torch.double)), + lambda: torch.randn(9, dtype=torch.double).gt(0).to(torch.double) + ), + ('BCEWithLogitsLoss', lambda: torch.randn(9, dtype=torch.double), lambda: torch.randn(9, dtype=torch.double)), + ('HingeEmbeddingLoss', lambda: torch.randn(9, dtype=torch.double), lambda: torch.tensor([-1, 1, 1] * 3)), + ('MultiLabelMarginLoss', lambda: torch.randn(4, dtype=torch.double), lambda: torch.tensor([3, 0, -1, 1])), + ('SoftMarginLoss', lambda: torch.randn(9, dtype=torch.double), lambda: torch.tensor([-1, 1, 1] * 3)), + ('NLLLoss', lambda: F.log_softmax(torch.randn(3, dtype=torch.double), dim=0), lambda: torch.tensor(1)), + ( + 'CosineEmbeddingLoss', + lambda: (torch.randn(9, dtype=torch.double), torch.randn(9, dtype=torch.double)), + lambda: torch.tensor(1, dtype=torch.double) + ), + # For MarginRankingLoss, input_fn : (x1, x2) and target_fn : target + ('MarginRankingLoss', lambda: (torch.randn(()), torch.randn(())), lambda: torch.randn(()).sign()), + # For TripletMarginLoss, input_fn : (anchor, positive) and target_fn : negative + ( + 'TripletMarginLoss', + lambda: (torch.randn(9, dtype=torch.double), torch.randn(9, dtype=torch.double)), + lambda: torch.randn(9, dtype=torch.double) + ), + ('MultiLabelSoftMarginLoss', lambda: torch.randn(9, dtype=torch.double), lambda: torch.randn(9)), +] +classification_criterion_no_batch_extra_info: Dict[str, dict] = { + 'MultiLabelMarginLoss': {'check_gradgrad': False}, +} +# TODO : Fix these discrepancies +classification_cpp_parity = { + 'BCELoss': False, + 'BCEWithLogitsLoss': False, + 'HingeEmbeddingLoss': False, + 'NLLLoss': False, + 'SoftMarginLoss': False, +} +reductions = ['none', 'mean', 'sum'] +for (name, input_fn, target_fn), reduction in product(classification_criterion_no_batch, + reductions): + classification_test_info = dict( + fullname=f"{name}_no_batch_dim_{reduction}", + constructor=lambda *args, name=name: getattr(nn, name)(reduction=reduction), + input_fn=lambda f=input_fn: f(), + target_fn=lambda f=target_fn: f(), + reference_fn=single_batch_reference_criterion_fn, + test_cpp_api_parity=True, + has_parity=classification_cpp_parity.get(name, True) + ) + extra_info = classification_criterion_no_batch_extra_info.get(name, {}) + classification_test_info.update(extra_info) + criterion_tests.append(classification_test_info) + + +class NNTestCase(TestCase): + + # _forward is defined in classes inheriting from NNTestCase + @abstractmethod + def _forward(self, *args, **kwargs): + raise NotImplementedError + + @abstractmethod + def _get_parameters(self, module: nn.Module) -> Tuple[List[nn.Parameter], List[nn.Parameter]]: + raise NotImplementedError + + @abstractmethod + def _zero_grad_parameters(self, module: nn.Module) -> None: + raise NotImplementedError + + @abstractmethod + def _backward(self, module: nn.Module, + input: _TensorOrTensors, output: torch.Tensor, + grad_output: Union[torch.Tensor, Sequence[torch.Tensor]], + create_graph: bool = False): + raise NotImplementedError + + def _jacobian(self, input, num_out): + if isinstance(input, tuple): + return tuple(self._jacobian(elem, num_out) for elem in input) + elif isinstance(input, list): + return [self._jacobian(elem, num_out) for elem in input] + else: + return torch.zeros(input.nelement(), num_out) + + def _flatten_tensors(self, x): + if isinstance(x, torch.Tensor): + if x.is_sparse: + return x.to_dense().view(-1) + else: + return x.view(-1) + else: + return tuple(self._flatten_tensors(a) for a in x) + + def _zero_grad_input(self, input): + if isinstance(input, torch.Tensor): + if input.requires_grad and input.grad is not None: + input.grad.zero_() + input.grad.detach_() + else: + for i in input: + self._zero_grad_input(i) + + def _analytical_jacobian(self, module, input: _TensorOrTensors, jacobian_input=True, jacobian_parameters=True): + output = self._forward(module, input) + output_size = output.nelement() + + if jacobian_input: + jacobian_inp = self._jacobian(input, output_size) + flat_jacobian_input = list(_iter_tensors(jacobian_inp)) + + if jacobian_parameters: + num_param = sum(p.numel() for p in self._get_parameters(module)[0]) + jacobian_param = torch.zeros(num_param, output_size) + + for i in range(output_size): + param, d_param = self._get_parameters(module) + # make non grad zeros + d_param = [torch.zeros_like(p) if d is None else d for (p, d) in zip(param, d_param)] + + d_out = torch.zeros_like(output) + flat_d_out = d_out.view(-1) + flat_d_out[i] = 1 + + if jacobian_parameters: + self._zero_grad_parameters(module) + # Tensors will accumulate gradient from multiple steps + if jacobian_input: + self._zero_grad_input(input) + d_input = self._backward(module, input, output, d_out) + + if jacobian_input: + for jacobian_x, d_x in zip(flat_jacobian_input, _iter_tensors(d_input)): + jacobian_x[:, i] = d_x.contiguous().view(-1) + if jacobian_parameters: + jacobian_param[:, i] = torch.cat(self._flatten_tensors(d_param), 0) + + res: Tuple[torch.Tensor, ...] = tuple() + if jacobian_input: + res += jacobian_inp, + if jacobian_parameters: + res += jacobian_param, + + return res + + def _numerical_jacobian(self, module, input: _TensorOrTensors, jacobian_input=True, jacobian_parameters=True): + def fw(*input): + return self._forward(module, input).detach() + + res: Tuple[torch.Tensor, ...] = tuple() + if jacobian_input: + res += _get_numerical_jacobian(fw, input, eps=1e-6), + if jacobian_parameters: + param, _ = self._get_parameters(module) + to_cat = [] + for p in param: + jacobian = _get_numerical_jacobian(fw, input, target=p, eps=1e-6) + # get_numerical_jacobian returns a list of tuples but we require a tensor + to_cat.append(jacobian[0][0]) + res += (torch.cat(to_cat, 0),) + return res + + def check_jacobian(self, module, input: _TensorOrTensors, jacobian_input=True): + jacobian_parameters = bool(self._get_parameters(module)[0]) + analytical = self._analytical_jacobian(module, input, jacobian_input, jacobian_parameters) + numerical = self._numerical_jacobian(module, input, jacobian_input, jacobian_parameters) + analytical_t = list(_iter_tensors(analytical)) + numerical_t = list(_iter_tensors(numerical)) + + differences = [] + for a, n in zip(analytical_t, numerical_t): + if a.numel() != 0: + differences.append(a.add(n, alpha=-1).abs().max()) + # TODO: compare structure (ensure analytic jacobian has correct shape) + if len(differences) > 0: + self.assertLessEqual(max(differences), PRECISION) # type: ignore[type-var] + + +class TestBase: + + _required_arg_names = {'constructor_args', 'input', 'extra_args'} + + def __init__(self, constructor, desc='', reference_fn=None, fullname=None, **kwargs): + self.desc = desc + self.fullname = fullname + self.constructor = constructor + self.reference_fn = reference_fn + for name in self._required_arg_names: + if name not in kwargs and name + '_fn' not in kwargs and name + '_size' not in kwargs: + if name in {'constructor_args', 'extra_args'}: + kwargs[name] = tuple() + else: + raise ValueError("{}: Specify {} by a value, a function to generate it, or it's size!" + .format(self.get_name(), name)) + self._extra_kwargs = kwargs + self._arg_cache = {} + + def get_name(self): + if self.fullname is not None: + return 'test_' + self.fullname + + test_name = 'test_' + self.constructor.__name__ + if self.desc: + test_name += '_' + self.desc + return test_name + + def _unpack(self, value): + if isinstance(value, torch.Tensor): + return value + elif is_iterable(value): + return type(value)(self._unpack(v) for v in value) + else: + return value + + @property + def constructor_args(self): + return self._get_arg('constructor_args', True) + + @property + def extra_args(self): + return self._get_arg('extra_args', True) + + def _get_arg(self, name, unpack): + assert name in self._required_arg_names + + if name not in self._arg_cache: + fn_name = name + '_fn' + size_name = name + '_size' + + if name in self._extra_kwargs: + self._arg_cache[name] = self._extra_kwargs[name] + elif fn_name in self._extra_kwargs: + self._arg_cache[name] = self._extra_kwargs[fn_name]() + else: + assert size_name in self._extra_kwargs, \ + f"Missing `{name}`, `{size_name}` or `{fn_name}` for {self.get_name()}" + + def map_tensor_sizes(sizes): + if isinstance(sizes, list): + return [map_tensor_sizes(s) for s in sizes] + elif isinstance(sizes, torch.Tensor): + return sizes.double() + else: + return torch.randn(sizes) + + self._arg_cache[name] = map_tensor_sizes(self._extra_kwargs[size_name]) + + return self._unpack(self._arg_cache[name]) if unpack else self._arg_cache[name] + + def _get_input(self, unpack=True): + return self._get_arg('input', unpack) + + def __call__(self, test_case): + raise NotImplementedError + + +class ModuleTest(TestBase): + + @abstractmethod + def _do_test(self, test_case: Any, module: nn.Module, input: Any) -> Any: + raise NotImplementedError + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.jacobian_input = kwargs.get('jacobian_input', True) + self.should_test_cuda = kwargs.get('test_cuda', True) + self.should_test_pickle = kwargs.get('pickle', True) + self.check_gradgrad = kwargs.get('check_gradgrad', True) + self.FIXME_no_cuda_gradgrad_comparison = \ + kwargs.get('FIXME_no_cuda_gradgrad_comparison', False) + self.precision = kwargs.get('precision', 2e-4) + self.check_forward_only = kwargs.get('check_forward_only', False) + self.default_dtype = kwargs.get('default_dtype', None) + if self.default_dtype is None: + self.default_dtype = torch.get_default_dtype() + + def __call__(self, test_case): + with set_default_dtype(self.default_dtype): + module = self.constructor(*self.constructor_args) + input = self._get_input() + + if self.reference_fn is not None: + out = test_case._forward(module, input) + ref_input = deepcopy(input) + ref_module = deepcopy(module) + expected_out = self.reference_fn(ref_input, test_case._get_parameters(module)[0], ref_module) + test_case.assertEqual(out, expected_out, exact_dtype=False) + if self.check_forward_only: + return + self.test_noncontig(test_case, module, input) + + if self.should_test_pickle: + # TODO: do this with in-memory files as soon as torch.save will support it + with tempfile.TemporaryFile() as f: + test_case._forward(module, input) + torch.save(module, f) + f.seek(0) + module_copy = torch.load(f) + test_case.assertEqual(test_case._forward(module, input), test_case._forward(module_copy, input)) + + self._do_test(test_case, module, input) + + def noncontiguize(self, obj): + if isinstance(obj, list): + return [self.noncontiguize(o) for o in obj] + elif isinstance(obj, tuple): + return tuple(self.noncontiguize(o) for o in obj) + tensor = obj + ndim = tensor.dim() + # Always making only the last dimension noncontiguous is easy to hide + # bugs because .view(-1) will still work. So try to find a dim with size + # > 1 and make that non-contiguous, i.e., stack + select on the + # dimension directly after that. + dim = ndim + for d in range(ndim): + if tensor.size(d) > 1: + dim = d + 1 + break + noncontig = torch.stack([torch.empty_like(tensor), tensor], dim).select(dim, 1).detach() + assert noncontig.numel() == 1 or noncontig.numel() == 0 or not noncontig.is_contiguous() + noncontig.requires_grad = tensor.requires_grad + return noncontig + + def test_noncontig(self, test_case, module, input): + # check no scalars, can't make non-contig + if isinstance(input, torch.Tensor) and input.dim() == 0: + return + if any(i.dim() == 0 for i in input if isinstance(i, torch.Tensor)): + return + + test_case._zero_grad_parameters(module) + test_case._zero_grad_input(input) + with freeze_rng_state(): + output = test_case._forward(module, input) + if getattr(module, "return_indices", False): + output = output[0] + grad_output = output.new(output.shape).normal_() + output = output.clone() + d_input = deepcopy(test_case._backward(module, input, output, grad_output)) + d_param = deepcopy(test_case._get_parameters(module)[1]) + + nc_input = self.noncontiguize(input) + nc_grad_output = self.noncontiguize(grad_output) + for contig_i, contig_g in product((True, False), repeat=2): + i = input if contig_i else nc_input + # Some ops, e.g., nn.Flatten, return gradient that shares + # storage with the grad_output. Hence we copy here. + go = deepcopy(grad_output if contig_g else nc_grad_output) + test_case._zero_grad_parameters(module) + test_case._zero_grad_input(i) + with freeze_rng_state(): + out = test_case._forward(module, i) + if getattr(module, "return_indices", False): + out = out[0] + grad = test_case._backward(module, i, out, go) + + test_case.assertEqual(out, output) + test_case.assertEqual(grad, d_input, atol=1e-4, rtol=0) + test_case.assertEqual(test_case._get_parameters(module)[1], d_param) + + def test_cuda(self, test_case): + if not TEST_CUDA or not self.should_test_cuda: + raise unittest.SkipTest('Excluded from CUDA tests') + + with set_default_dtype(self.default_dtype): + cpu_input = self._get_input() + + type_map = {torch.double: torch.float} + cpu_input_tuple = cpu_input if isinstance(cpu_input, tuple) else (cpu_input,) + + is_any_input_complex = any(isinstance(t, torch.Tensor) and t.dtype.is_complex for t in cpu_input_tuple) + + gpu_input_tuple = to_gpu(cpu_input_tuple, type_map=type_map) + + cpu_module = self.constructor(*self.constructor_args) + gpu_module = self.constructor(*self.constructor_args).float().cuda() + cpu_param = test_case._get_parameters(cpu_module) + gpu_param = test_case._get_parameters(gpu_module) + for cpu_p, gpu_p in zip(cpu_param[0], gpu_param[0]): + gpu_p.data.copy_(cpu_p) + + test_case._zero_grad_input(cpu_input_tuple) + test_case._zero_grad_input(gpu_input_tuple) + test_case._zero_grad_parameters(cpu_module) + test_case._zero_grad_parameters(gpu_module) + cpu_output = test_case._forward(cpu_module, cpu_input_tuple) + gpu_output = test_case._forward(gpu_module, gpu_input_tuple) + if getattr(cpu_module, "return_indices", False): + cpu_output = cpu_output[0] + gpu_output = gpu_output[0] + test_case.assertEqual(cpu_output, gpu_output, atol=self.precision, rtol=0, exact_dtype=False) + + # Run backwards on CPU and GPU and compare results + for _ in range(5): + cpu_gradOutput = cpu_output.clone().normal_() + gpu_gradOutput = cpu_gradOutput.type_as(gpu_output) + cpu_gradInput = test_case._backward(cpu_module, cpu_input_tuple, cpu_output, cpu_gradOutput) + gpu_gradInput = test_case._backward(gpu_module, gpu_input_tuple, gpu_output, gpu_gradOutput) + test_case.assertEqual(cpu_gradInput, gpu_gradInput, atol=self.precision, rtol=0, exact_dtype=False) + for cpu_d_p, gpu_d_p in zip(cpu_param[1], gpu_param[1]): + test_case.assertEqual(cpu_d_p, gpu_d_p, atol=self.precision, rtol=0) + + # Run double-backwards on CPU and GPU and compare results + if self.check_gradgrad and not self.FIXME_no_cuda_gradgrad_comparison: + cpu_output = cpu_module(*cpu_input_tuple) + gpu_output = gpu_module(*gpu_input_tuple) + if getattr(cpu_module, "return_indices", False): + cpu_output = cpu_output[0] + gpu_output = gpu_output[0] + + cpu_gradOutput = torch.randn_like(cpu_output, requires_grad=True) + gpu_gradOutput = cpu_gradOutput.type_as(gpu_output).detach() + gpu_gradOutput.requires_grad = True + + cpu_gradInputs = torch.autograd.grad( + cpu_output, + cpu_input_tuple + tuple(cpu_module.parameters()), + cpu_gradOutput, + create_graph=True) + gpu_gradInputs = torch.autograd.grad( + gpu_output, + gpu_input_tuple + tuple(gpu_module.parameters()), + gpu_gradOutput, + create_graph=True) + + for cpu_d_i, gpu_d_i in zip(cpu_gradInputs, gpu_gradInputs): + test_case.assertEqual(cpu_d_i, gpu_d_i, atol=self.precision, rtol=0, exact_dtype=False) + + # We mix output into the second backwards computation so that + # torch.autograd.grad doesn't complain that some inputs + # are unreachable (which can happen if you differentiate + # only on the gradient. + if is_any_input_complex: + outputs_cpu = cpu_output.sum().abs() + sum(x.sum().abs() for x in cpu_gradInputs) + outputs_gpu = gpu_output.sum().abs() + sum(x.sum().abs() for x in gpu_gradInputs) + else: + outputs_cpu = cpu_output.sum() + sum(x.sum() for x in cpu_gradInputs) + outputs_gpu = gpu_output.sum() + sum(x.sum() for x in gpu_gradInputs) + + cpu_gg = torch.autograd.grad( + outputs_cpu, + cpu_input_tuple + (cpu_gradOutput,) + tuple(cpu_module.parameters()), + retain_graph=True) + gpu_gg = torch.autograd.grad( + outputs_gpu, + gpu_input_tuple + (gpu_gradOutput,) + tuple(gpu_module.parameters()), + retain_graph=True) + test_case.assertEqual(cpu_gradInput, gpu_gradInput, atol=self.precision, rtol=0, exact_dtype=False) + for cpu_d_p, gpu_d_p in zip(cpu_gg, gpu_gg): + test_case.assertEqual(cpu_d_p, gpu_d_p, atol=self.precision, rtol=0, exact_dtype=False) + + self.test_noncontig(test_case, gpu_module, gpu_input_tuple) + + +class InputVariableMixin: + def _get_input(self): + input = TestBase._get_input(self, False) # type: ignore[arg-type] + + def map_variables(i): + if isinstance(i, torch.Tensor): + if i.is_floating_point() or i.is_complex(): + i.requires_grad = True + return i + else: + return type(i)(map_variables(elem) for elem in i) + + return map_variables(input) + + +class NewModuleTest(InputVariableMixin, ModuleTest): # type: ignore[misc] + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.cudnn = kwargs.get('cudnn', False) + self.check_inplace = kwargs.get('check_inplace', False) + self.check_gradgrad = kwargs.get('check_gradgrad', True) + self.skip_double = kwargs.get('skip_double', False) + self.skip_half = kwargs.get('skip_half', False) + self.with_tf32 = kwargs.get('with_tf32', False) + self.tf32_precision = kwargs.get('tf32_precision', 0.001) + self.test_cpu = kwargs.get('test_cpu', True) + self.has_sparse_gradients = kwargs.get('has_sparse_gradients', False) + self.check_batched_grad = kwargs.get('check_batched_grad', True) + self.gradcheck_fast_mode = kwargs.get('gradcheck_fast_mode', None) + self.supports_forward_ad = kwargs.get('supports_forward_ad', False) + self.supports_fwgrad_bwgrad = kwargs.get('supports_fwgrad_bwgrad', False) + + def _check_gradients(self, test_case, module, input_tuple): + params = tuple(x for x in module.parameters()) + num_inputs = len(input_tuple) + + def fn_to_gradcheck(*inputs_and_params, **kwargs): + assert not kwargs + return test_case._forward(module, inputs_and_params[:num_inputs]) + + # gradcheck doesn't support operators that take in dense inputs but + # return sparse parameters. This only happens in the case of nn.Embedding + # and nn.EmbeddingBag. Instead, we call `self.check_jacobian`, which + # is a slightly different version of gradcheck that can handle this. + if self.has_sparse_gradients: + assert num_inputs == 1 + test_input_jacobian = torch.is_floating_point(input_tuple[0]) + test_case.check_jacobian(module, input_tuple[0], test_input_jacobian) + else: + test_case.assertTrue(gradcheck(fn_to_gradcheck, input_tuple + params, + check_batched_grad=self.check_batched_grad, + fast_mode=self.gradcheck_fast_mode, + check_forward_ad=self.supports_forward_ad)) + + if self.check_gradgrad: + test_case.assertTrue(gradgradcheck(fn_to_gradcheck, input_tuple + params, + check_batched_grad=self.check_batched_grad, + fast_mode=self.gradcheck_fast_mode, + check_fwd_over_rev=self.supports_fwgrad_bwgrad)) + + def _do_test(self, test_case, module, input): + num_threads = torch.get_num_threads() + torch.set_num_threads(1) + input_tuple = input if isinstance(input, tuple) else (input,) + + self._check_gradients(test_case, module, input_tuple) + + # check if module can be printed + module.__repr__() + + if self.check_inplace: + # check if the inplace variant of the module gives the same result + # as the out-of-place + + # check_inplace doesn't support multiple input tensors, since we don't have any modules + # that modify the inputs in-place and that accept more than one input + assert len(input_tuple) == 1 + input = input_tuple[0] + + module_ip = self.constructor(*self.constructor_args, inplace=True) + + input_version = input._version + with freeze_rng_state(): + output = module(input) + test_case.assertEqual(input._version, input_version) + + input_ip = deepcopy(input) + input_ip_clone = input_ip.clone() + with freeze_rng_state(): + output_ip = module_ip(input_ip_clone) + test_case.assertNotEqual(input_ip_clone._version, input_version) + test_case.assertEqual(output, output_ip) + grad = output.data.clone().normal_() + if input.grad is not None: + with torch.no_grad(): + input.grad.zero_() + if input_ip.grad is not None: + with torch.no_grad(): + input_ip.grad.zero_() + output.backward(grad) + output_ip.backward(grad) + test_case.assertEqual(input.grad, input_ip.grad) + + def assert_module_parameters_are(tensor_type, device_id=None): + for p in module.parameters(): + test_case.assertIsInstance(p, tensor_type) + if device_id is not None: + test_case.assertEqual(p.get_device(), device_id) + + if all(isinstance(t, torch.LongTensor) for t in input_tuple) and TEST_CUDA: + # check that cuda() moves module parameters to correct GPU device, + # and that float() casts parameters correctly + input_tuple = tuple(t.cuda() for t in input_tuple) + module.float().cuda() + module(*input_tuple) + assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined] + + if torch.cuda.device_count() > 1: + input_tuple = tuple(t.cuda(1) for t in input_tuple) + module.cuda(1) + with torch.cuda.device(1): + module(*input_tuple) + assert_module_parameters_are(torch.cuda.FloatTensor, 1) # type: ignore[attr-defined] + else: + # check that float()/double() casters work correctly + def to_type(tensor, real, complex): + if tensor.is_complex(): + return tensor.to(complex) + elif tensor.is_floating_point(): + return tensor.to(real) + else: + return tensor + + def to_half(x): + # TODO: torch.complex32 when properly supported + return to_type(x, torch.float16, None) + + def to_single(x): + return to_type(x, torch.float32, torch.complex64) + + def to_double(x): + return to_type(x, torch.float64, torch.complex128) + + # to float + input_tuple = tuple(to_single(t) for t in input_tuple) + module.float() + module(*input_tuple) + assert_module_parameters_are(torch.FloatTensor) + + # and back to double + input_tuple = tuple(to_double(t) for t in input_tuple) + module.double() + module(*input_tuple) + assert_module_parameters_are(torch.DoubleTensor) + + if TEST_CUDA and self.should_test_cuda: + # check that cuda() moves module parameters to correct GPU device, + # and that float() casts parameters correctly + + # to GPU0 + input_tuple = tuple(to_single(t).cuda() for t in input_tuple) + module.float().cuda() + module(*input_tuple) + assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined] + + # to CPU + input_tuple = tuple(t.cpu() for t in input_tuple) + module.cpu() + module(*input_tuple) + assert_module_parameters_are(torch.FloatTensor) + + # back to GPU0 + input_tuple = tuple(t.cuda() for t in input_tuple) + module.cuda() + module(*input_tuple) + assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined] + + # test that forwards of module runs correctly without cuDNN + if self.cudnn: + with torch.backends.cudnn.flags(enabled=False): + module(*input_tuple) + assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined] + + if torch.cuda.device_count() >= 2: + # test cross-GPU transfer works + # to GPU1 + input_tuple = tuple(t.cuda(1) for t in input_tuple) + module.cuda(1) + with torch.cuda.device(1): + module(*input_tuple) + assert_module_parameters_are(torch.cuda.FloatTensor, 1) # type: ignore[attr-defined] + + if not self.skip_double: + # test double() + input_tuple = tuple(to_double(t).cuda() for t in input_tuple) + module.double().cuda() + module(*input_tuple) + assert_module_parameters_are(torch.cuda.DoubleTensor, 0) # type: ignore[attr-defined] + + # test half() + if not self.skip_half: + input_tuple = tuple(to_half(t).cuda() for t in input_tuple) + module.half().cuda() + module(*input_tuple) + assert_module_parameters_are(torch.cuda.HalfTensor, 0) # type: ignore[attr-defined] + torch.set_num_threads(num_threads) + + def _get_target(self): + return self._get_arg('target', False) + + @property + def constructor_args(self): + return self._get_arg('constructor_args', False) + + +class CriterionTest(InputVariableMixin, TestBase): # type: ignore[misc] + # TODO: check that criterions don't ignore grad_output + + _required_arg_names = TestBase._required_arg_names.union({'target'}) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.should_test_cuda = kwargs.get('test_cuda', True) + self.check_forward_only = kwargs.get('check_forward_only', False) + self.check_gradgrad = kwargs.get('check_gradgrad', True) + self.check_half = kwargs.get('check_half', True) + self.check_bfloat16 = kwargs.get('check_bfloat16', False) + self.check_complex = kwargs.get('check_complex', False) + self.test_cpu = kwargs.get('test_cpu', True) + self.with_tf32 = kwargs.get('with_tf32', True) + self.tf32_precision = kwargs.get('tf32_precision', 0.001) + self.check_batched_grad = kwargs.get('check_batched_grad', True) + self.default_dtype = kwargs.get('default_dtype', None) + if self.default_dtype is None: + self.default_dtype = torch.get_default_dtype() + + def __call__(self, test_case): + with set_default_dtype(self.default_dtype): + module = self.constructor(*self.constructor_args) + input = self._get_input() + + # Check that these methods don't raise errors + module.__repr__() + str(module) + + target = self._get_target() + + if self.reference_fn is not None: + out = test_case._forward_criterion(module, input, target, extra_args=self.extra_args) + ref_args = (deepcopy(input), deepcopy(target)) + self.extra_args + (module,) + expected_out = self.reference_fn(*ref_args) + test_case.assertEqual(out, expected_out) + + if self.check_forward_only: + return + + params = tuple(x for x in module.parameters()) + if not isinstance(input, tuple): + inputs = (input,) + params + (target,) + + def apply_fn(input, target, *params): + return module(input, target) + else: + inputs = input + params + (target,) + + def apply_fn(input1, input2, target, *params): # type: ignore[misc] + return module(input1, input2, target) + + gradcheck(apply_fn, inputs, check_batched_grad=self.check_batched_grad) + + if self.check_gradgrad: + gradgradcheck(apply_fn, inputs, check_batched_grad=self.check_batched_grad) + + def test_cuda(self, test_case, dtype, extra_args=None): + def convert_dtype(obj, dtype, requires_grad=False): + if isinstance(obj, torch.Tensor): + return obj.detach().to(dtype=dtype).requires_grad_(requires_grad) + elif isinstance(obj, tuple): + return tuple(convert_dtype(o, dtype, requires_grad) for o in obj) + else: + return obj + + if not TEST_CUDA or not self.should_test_cuda: + raise unittest.SkipTest('Excluded from CUDA tests') + + with set_default_dtype(self.default_dtype): + cpu_input = self._get_input() + cpu_target = self._get_target() + cpu_module = self.constructor(*self.constructor_args) + gpu_module = self.constructor(*self.constructor_args) + + # Convert input, target and module parameters to dtype + cpu_input = convert_dtype(cpu_input, dtype, True) + if cpu_target.is_floating_point() or cpu_target.is_complex(): + cpu_target = convert_dtype(cpu_target, dtype) + cpu_module.type(dtype) + gpu_module.type(dtype) + + # GPU setup + gpu_input = to_gpu(cpu_input) + gpu_target = to_gpu(cpu_target) + gpu_module.cuda() + + # torch.HalfTensor doesn't support most operations, converting back to default + if dtype in {torch.half, torch.bfloat16}: + cpu_input = self._get_input() + cpu_target = self._get_target() + # Loss modules with weights require consistent input/module weight types + cpu_module = self.constructor(*self.constructor_args) + + cpu_output = test_case._forward_criterion(cpu_module, cpu_input, cpu_target, extra_args=extra_args) + gpu_output = test_case._forward_criterion(gpu_module, gpu_input, gpu_target, extra_args=extra_args) + # dtype used to be able to be None, so set precision in this way instead of a precision map + test_case.assertEqual(cpu_output, gpu_output, + atol=1e-1 if dtype in {torch.half, torch.bfloat16} else 4e-4, rtol=0, exact_dtype=False) + + cpu_gradInput = test_case._backward_criterion( + cpu_module, cpu_input, cpu_output, cpu_target, extra_args=extra_args) + gpu_gradInput = test_case._backward_criterion( + gpu_module, gpu_input, gpu_output, gpu_target, extra_args=extra_args) + # dtype used to be able to be None, so set precision in this way instead of a precision map + test_case.assertEqual(cpu_gradInput, gpu_gradInput, + atol=1e-1 if dtype in {torch.half, torch.bfloat16} else 4e-4, rtol=0, exact_dtype=False) + + def _get_target(self): + return self._get_arg('target', False) + + @property + def constructor_args(self): + return self._get_arg('constructor_args', False) + + @property + def extra_args(self): + return self._get_arg('extra_args', False) + + +def _test_bfloat16_ops(test_case, op, device, inp_dims=(), prec=1e-2, scale_factor=None): + # fp32 compute + input1 = torch.randn(inp_dims, dtype=torch.float32, device=device, requires_grad=True) + if scale_factor is not None: + input1 = (torch.rand(inp_dims, dtype=torch.bfloat16, device=device) * scale_factor).float().requires_grad_() + out1 = op(input1) + grad_input1 = torch.randn_like(out1, device=device) + out1.backward(grad_input1) + + # bfloat16 compute + op_bfp16 = op.bfloat16() + input2 = input1.detach().bfloat16().requires_grad_() + grad_input2 = grad_input1.bfloat16() + out2 = op_bfp16(input2) + out2.backward(grad_input2) + + test_case.assertEqual(out1, out2, atol=prec, rtol=prec, exact_dtype=False) + test_case.assertEqual(input1.grad.data, input2.grad.data, atol=prec, rtol=prec, exact_dtype=False) + +def _test_module_empty_input(test_case, module, inp, check_size=True, inference=False): + if not inference: + inp.requires_grad_(True) + out = module(inp) + if not inference: + gO = torch.rand_like(out) + out.backward(gO) + if check_size: + test_case.assertEqual(out.size(), inp.size()) + if not inference: + for p in module.parameters(): + if p.requires_grad: + test_case.assertEqual(p.grad, torch.zeros_like(p.grad)) + test_case.assertEqual(inp.grad, torch.zeros_like(inp)) + + +def _create_basic_net(): + class Layer(nn.Module): + def __init__(self): + super().__init__() + self.layer_dummy_param = nn.Parameter(torch.empty(3, 5)) + self.register_buffer('layer_dummy_buf', torch.zeros(1, 3, 3, 7)) + + class Net(nn.Module): + def __init__(self): + super().__init__() + self.l1 = Layer() + self.dummy_param = nn.Parameter(torch.empty(3, 5)) + self.register_buffer('dummy_buf', torch.zeros(7, 3, 3, 1)) + + l = Layer() + n = Net() + s = nn.Sequential(n, n) + + return l, n, s diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f687eb854576a32991dee083c6cdeb6018c1092 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network1.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3944ab2587c3e7042ac7782829ef0d3238b178d3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network1.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network2.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46e365a309c259167c237ba367ba320324fa3d82 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network2.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/network1.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/network1.py new file mode 100644 index 0000000000000000000000000000000000000000..aed9cb404d6f2649b5141cf6683db30c29198fce --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/network1.py @@ -0,0 +1,8 @@ +import torch.nn as nn + + +class Net(nn.Module): + + def __init__(self): + super().__init__() + self.linear = nn.Linear(10, 20) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/network2.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/network2.py new file mode 100644 index 0000000000000000000000000000000000000000..e6022a1e7570f5333c0400bfd5221197a2f4ea83 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/data/network2.py @@ -0,0 +1,9 @@ +import torch.nn as nn + + +class Net(nn.Module): + + def __init__(self): + super().__init__() + self.linear = nn.Linear(10, 20) + self.relu = nn.ReLU() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69a79d17214cf303f65acbc69e6104fc9235df13 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a90ad3f75f1dd279f661bcf095a8d57e41ab9ab Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..034d0135bdcdb24b250d2e4ec3bad138a3e5e3d5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d8938e5c7fb06e5a8f9cb7d62e69fc90c4d3075 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_test.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e80a2ea3f648c4e62e51e47c27cfdb50a79052ad Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_test.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08848ca63d91906a7fc9204d140ff549f96e42d1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c26f4949a5052e3a1e965fa04cc4600a33506cb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22845bb99fc6db5e2a596db84464be5fe5509e07 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/pipe_with_ddp_test.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/pipe_with_ddp_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a1ab459ad5b24039cc13d0f958cfc924322d398 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/pipe_with_ddp_test.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98d96967a82780d3efc8a4918d2625ba66671b65 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8dc44958e73062e89d7abec321a818174f702e4d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41f74e3e3f5b007faa5744330e357dd9d6309337 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0ca11a7fbd724ee9541a5e00b77ddee143b9cbdb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py @@ -0,0 +1,96 @@ +import sys +from functools import wraps, partial + +import torch +import torch.distributed as dist +from torch.distributed import rpc +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + TEST_SKIPS, + tp_transports, +) + +TEST_GPU_NUM = 4 + +class ShardedTensorTestBase(MultiProcessTestCase): + @property + def world_size(self): + return TEST_GPU_NUM + + def init_pg(self, backend="nccl"): + if backend not in ["nccl", "gloo", "mpi"]: + raise RuntimeError(f"Backend {backend} not supported!") + + dist.init_process_group( + backend=backend, + world_size=self.world_size, + rank=self.rank, + init_method=f"file://{self.file_name}", + ) + + # set device for nccl pg for collectives + if backend == "nccl": + torch.cuda.set_device(self.rank) + + + def init_rpc(self): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions(_transports=tp_transports()) + rpc_backend_options.init_method = f"file://{self.file_name}" + for rank in range(self.world_size): + rpc_backend_options.set_device_map( + f"worker{rank}", {rank: self.rank, self.rank: rank} + ) + + rpc.init_rpc( + name="worker%d" % self.rank, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + def init_comms(self, init_rpc=True, backend="nccl"): + if init_rpc: + self.init_rpc() + self.init_pg(backend=backend) + + def destroy_comms(self, destroy_rpc=True): + # Wait for all ranks to reach here before starting shutdown. + dist.barrier() + + if destroy_rpc: + rpc.shutdown() + dist.destroy_process_group() + + def setUp(self) -> None: + super().setUp() + self._spawn_processes() + + def assert_sharded_tensor_equal(self, st1, st2): + st1_local_shards = st1.local_shards() + st2_local_shards = st2.local_shards() + self.assertEqual(len(st1_local_shards), len(st2_local_shards)) + for i, st1_local_shard in enumerate(st1_local_shards): + self.assertEqual(st1_local_shard.tensor, st2_local_shards[i].tensor) + self.assertEqual(st1_local_shard.metadata, st2_local_shards[i].metadata) + + self.assertEqual(st1.metadata(), st2.metadata()) + self.assertEqual(st1.sharding_spec(), st2.sharding_spec()) + self.assertEqual(len(st1.remote_shards()), len(st2.remote_shards())) + +# wrapper to initialize comms (processgroup + rpc) +def with_comms(func=None, init_rpc=True, backend="nccl"): + if func is None: + return partial( + with_comms, + init_rpc=init_rpc, + backend=backend, + ) + + @wraps(func) + def wrapper(self, *args, **kwargs): + if backend == "nccl" and torch.cuda.device_count() < self.world_size: + sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) + self.init_comms(init_rpc=init_rpc, backend=backend) + func(self, *args, **kwargs) + self.destroy_comms(destroy_rpc=init_rpc) + return wrapper diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9b5f62cdb73770a93d76f2554fb7f0300143e75 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0a67c24bf783d82dcb9612394de9779c8efe2ea Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25ac49d7c3d1534269ff35f6fb484d1bc4821c74 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py new file mode 100644 index 0000000000000000000000000000000000000000..6508a5d88e5fbf1621e96d2cc19a368f5ef0e8f1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py @@ -0,0 +1,134 @@ +import builtins + +import torch +from torch.distributed._shard.sharding_spec import ( + ChunkShardingSpec, + EnumerableShardingSpec, + ShardMetadata, +) +from torch.distributed._shard.sharding_spec._internals import ( + get_chunked_dim_size, + get_split_size, +) + + +def generate_chunk_sharding_specs_for_test(sharding_dim): + return [ + ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + "rank:3/cuda:3", + ], + ), + # Test different ordering. (Case 1) + ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:2/cuda:2", + "rank:3/cuda:3", + "rank:0/cuda:0", + "rank:1/cuda:1", + ], + ), + # Test different ordering. (Case 2) + ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:3/cuda:3", + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + ], + ), + ] + + +def generate_enumerable_sharding_specs_for_test(): + return [ + EnumerableShardingSpec( + [ + ShardMetadata( + shard_offsets=[0, 0], + shard_sizes=[5, 5], + placement="rank:0/cuda:0", + ), + ShardMetadata( + shard_offsets=[5, 0], + shard_sizes=[5, 5], + placement="rank:1/cuda:1", + ), + ShardMetadata( + shard_offsets=[0, 5], + shard_sizes=[5, 5], + placement="rank:2/cuda:2", + ), + ShardMetadata( + shard_offsets=[5, 5], + shard_sizes=[5, 5], + placement="rank:3/cuda:3", + ), + ] + ) + ] + + +def generate_local_weight_sharding_params_for_test( + local_weight, sharded_dim, gpu_num, spec, rank +): + """ + Shard the local weight based the given spec, so we can compare against + the one from sharded tensor. + + Args: + local_weight: weight matrix to be sharded. + sharded_dim: The dimension which we shard on. + gpu_num: number of ranks. + spec: sharding spec. + rank: # of cuda process. + + Returns: + start_pos: start position of sharded weight on the given rank. + chunk_size: chunk size of sharded weight on the given rank. + """ + sharding_dim_size = local_weight.size(sharded_dim) + split_size = get_split_size(sharding_dim_size, gpu_num) + current_offsets = 0 + start_pos = current_offsets + for idx, placement in enumerate(spec.placements): + chunk_size = get_chunked_dim_size(sharding_dim_size, split_size, idx) + if rank == placement.rank(): + start_pos = current_offsets + break + current_offsets += chunk_size + return start_pos, chunk_size + + +def clone_module_parameter(module, param_name): + """ + Clone a parameter from a given existing module. + + Args: + module (:class:`torch.nn.Module`): Module whose parameter needs to be cloned. + param_name (str): Name of the parameter of ``module`` that needs to be cloned. + + Returns: cloned tensor as :class:`torch.nn.Parameter`. + """ + tensor = getattr(module, param_name) + return torch.nn.Parameter(tensor.detach().clone()) + +def gen_binary_op_func(python_op, inplace=False): + src_lines = ['def f(lhs, rhs):'] + if "torch" in python_op: + src_lines.append(f' return {python_op}(lhs, rhs)\n') + elif inplace: + src_lines.append(f' lhs {python_op}= rhs\n return lhs\n') + else: + src_lines.append(f' return lhs {python_op} rhs\n') + + code_str = '\n'.join(src_lines) + g = {'torch': torch} + builtins.exec(code_str, g) + return g["f"] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py new file mode 100644 index 0000000000000000000000000000000000000000..58ce3c996fa030e4c6a1c8e354d6382115f1ad87 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py @@ -0,0 +1,64 @@ +import copy +import random +import torch +from torch.distributed._shard import sharded_tensor + +from torch.distributed._shard.sharding_spec import ( + ChunkShardingSpec, +) + +PLACEMENTS = [ + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + "rank:3/cuda:3", +] + +DEFAULT_GPU_NUM = 4 + + +def _chunk_sharding_specs_list_for_test(sharding_dims, seed=0): + spec_list = [] + for i in range(len(sharding_dims)): + random.Random(seed + i).shuffle(PLACEMENTS) + spec_list.append( + ChunkShardingSpec( + dim=sharding_dims[i], + placements=copy.deepcopy(PLACEMENTS), + ) + ) + return spec_list + +class MyShardedModel2(torch.nn.Module): + def __init__( + self, + spec=None, + group=None, + init_rrefs=True + ) -> None: + super().__init__() + if spec is not None: + self.sharded_tensor2 = sharded_tensor.rand( + spec, 10, 20, process_group=group, init_rrefs=init_rrefs + ) + else: + self.sharded_tensor2 = None + self.random_tensor2 = torch.nn.Parameter(torch.rand(2, 2)) + + +class MyShardedModel1(torch.nn.Module): + def __init__( + self, + spec=None, + group=None, + init_rrefs=True + ) -> None: + super().__init__() + if spec is not None: + self.sharded_tensor1 = sharded_tensor.rand( + spec, 10, 20, process_group=group, init_rrefs=init_rrefs + ) + else: + self.sharded_tensor1 = None + self.random_tensor1 = torch.nn.Parameter(torch.rand(2, 2)) + self.submodule = MyShardedModel2(spec, group, init_rrefs) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..786cb9ed42eac06bc106c3c5881a8f85fb098e55 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py @@ -0,0 +1,40 @@ +import torch +import torch.nn as nn + +from torch.distributed._shard.sharded_tensor import ShardedTensor + + +class SimpleMegatronLM(nn.Module): + def __init__(self, linear_size, rank=None, dtype=torch.float32): + super().__init__() + self.fc1 = nn.Linear(*linear_size[0], dtype=dtype) + self.gelu = nn.GELU() + self.fc2 = nn.Linear(*linear_size[1], dtype=dtype) + if rank is not None: + self.fc1.cuda(rank) + self.fc2.cuda(rank) + + def forward(self, inp): + return self.fc2(self.gelu(self.fc1(inp))) + + def get_weights(self): + if isinstance(self.fc1.weight, ShardedTensor): + weight1 = self.fc1.weight.local_tensor() + else: + weight1 = self.fc1.weight + + if isinstance(self.fc2.weight, ShardedTensor): + weight2 = self.fc2.weight.local_tensor() + else: + weight2 = self.fc2.weight + + return (weight1, weight2) + + def get_biases(self): + return (self.fc1.bias, self.fc2.bias) + + def get_weight_grads(self): + return (self.fc1.weight.grad, self.fc2.weight.grad) + + def get_bias_grads(self): + return (self.fc1.bias.grad, self.fc2.bias.grad) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2729911e854bfc511b77c711b09abcd807d1b4ce Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17db284de1e4b7c272901382494e0b4d6783e76e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py new file mode 100644 index 0000000000000000000000000000000000000000..b3471f48f1c88ba7a7d18d2e8d9564e4cbc836f1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py @@ -0,0 +1,358 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import itertools +import sys +from functools import wraps +from typing import ( + Any, + Callable, + Iterator, + Tuple, + Dict, + List, + Sequence, + TypeVar, + cast, +) + +import torch +import torch.distributed as dist + +from torch.utils._pytree import tree_flatten, tree_unflatten, TreeSpec +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + MultiThreadedTestCase, + TEST_SKIPS, + skip_if_lt_x_gpu, +) + +from torch.distributed._tensor import ( + DeviceMesh, + Shard, + Replicate, + distribute_tensor, +) +from torch.distributed._tensor.placement_types import Placement + +DEVICE_TYPE = "cuda" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else "cpu" +PG_BACKEND = "nccl" if DEVICE_TYPE == "cuda" else "gloo" + +NUM_DEVICES = 4 + +# We use this as a proxy for "multiple GPUs exist" +if torch.cuda.is_available() and torch.cuda.device_count() > 1: + # when we actually have multiple GPUs, relax the requirement to smaller counts. + NUM_DEVICES = min(NUM_DEVICES, torch.cuda.device_count()) + +T = TypeVar("T") + + +class MLPModule(torch.nn.Module): + def __init__(self, device): + super().__init__() + torch.manual_seed(5) + self.net1 = torch.nn.Linear(10, 16, device=device) + self.relu = torch.nn.ReLU() + self.net2 = torch.nn.Linear(16, 10, device=device) + + def forward(self, x): + return self.net2(self.relu(self.net1(x))) + + def reset_parameters(self): + self.net1.reset_parameters() + self.net2.reset_parameters() + + +def skip_unless_torch_gpu(method: T) -> T: + """ + Test decorator which skips the test unless there's a GPU available to torch. + + >>> # xdoctest: +SKIP + >>> @skip_unless_torch_gpu + >>> def test_some_method(self) -> None: + >>> ... + """ + # The builtin @skip_if_no_gpu relies on os.environ['WORLD_SIZE'] being set. + return cast(T, skip_if_lt_x_gpu(NUM_DEVICES)(method)) + + +class DTensorTestBase(MultiProcessTestCase): + @property + def world_size(self) -> int: + return NUM_DEVICES + + @property + def backend(self) -> str: + return PG_BACKEND + + def build_device_mesh(self) -> DeviceMesh: + return DeviceMesh(DEVICE_TYPE, list(range(NUM_DEVICES))) + + def init_pg(self) -> None: + if "nccl" in self.backend and torch.cuda.device_count() < self.world_size: + sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) + + if self.backend not in ["nccl", "gloo", "mpi", "cpu:gloo,cuda:nccl"]: + raise RuntimeError(f"Backend {self.backend} not supported!") + + dist.init_process_group( + backend=self.backend, + world_size=self.world_size, + rank=self.rank, # pyre-ignore[16] + init_method=f"file://{self.file_name}", # pyre-ignore[16] + ) + + # set device for nccl pg for collectives + if "nccl" in self.backend: + torch.cuda.set_device(self.rank) + + def destroy_pg(self) -> None: + # Wait for all ranks to reach here before starting shutdown. + # FIXME dist.barrier deadlocks with multiple threads and NCCL: https://github.com/pytorch/pytorch/issues/95895 + # dist.all_reduce(torch.zeros((1,), device="cuda" if torch.cuda.is_available() else "cpu")) + # FIXME can't use the above all_reduce as it causes hangs on bionic and focal. It hangs: + # test_dtensor.py -- DTensorMeshTest.test_dtensor_device_mesh_device_conversion + dist.barrier() + dist.destroy_process_group() + + def setUp(self) -> None: + super().setUp() + self._spawn_processes() + + # pyre-ignore[2]: + def _test_op(self, mesh: DeviceMesh, op_call, *args, **kwargs) -> None: + out = op_call(*args, **kwargs) + dtc = DTensorConverter(mesh, args, kwargs) + for d_args, d_kwargs in dtc: + # pyre can't find assertTrue anymore? + self.assertEqual(dtc.successful(), True) + d_out = op_call(*d_args, **d_kwargs) + self.assertEqual(d_out.full_tensor(), out) + + def run_subtests(self, *args, **kwargs): + return run_subtests(self, *args, **kwargs) + + +TestFunc = Callable[[object], object] + +# wrapper to initialize comms (processgroup) +def with_comms(func: TestFunc) -> TestFunc: + assert func is not None + + @wraps(func) # pyre-ignore[6] + def wrapper( + self, *args: Tuple[object], **kwargs: Dict[str, Any] # type: ignore[misc] + ) -> None: + # if backend not specified, and cuda available, then use nccl, else gloo + if torch.cuda.is_available() and torch.cuda.device_count() >= self.world_size: + self.device_type = "cuda" + else: + self.device_type = "cpu" + + self.init_pg() + func(self, *args, **kwargs) # type: ignore[misc] + self.destroy_pg() + + return wrapper + + +def run_subtests( + cls_inst, + subtest_config: Dict[str, List[Any]], + test_fn: Callable, + *test_args, + **test_kwargs: Any, +): + """ + Runs a test function given by ``test_fn`` as a subtest according to the + configurations specified by ``subtest_config``. This amortizes the + costly setup overhead (including process spawn and initializing the + process group) over the subtests. + + Args: + subtest_config (Dict[str, List[Any]]): A mapping from subtest + keyword argument name to a list of its possible values. + test_fn (Callable): A callable that runs the actual test. + test_args: Positional arguments to pass to ``test_fn``. + test_kwargs: Keyword arguments to pass to ``test_fn``. + """ + # Convert the config mapping to a list to have a fixed order + subtest_config_items: List[Tuple[str, List[Any]]] = list(subtest_config.items()) + subtest_config_keys: List[str] = [item[0] for item in subtest_config_items] + subtest_config_values: List[List[Any]] = [item[1] for item in subtest_config_items] + for values in itertools.product(*subtest_config_values): + # Map keyword to chosen value + subtest_kwargs = dict(zip(subtest_config_keys, values)) + with cls_inst.subTest(**subtest_kwargs): + test_fn(*test_args, **test_kwargs, **subtest_kwargs) + dist.barrier() + + +class DTensorOpTestBase(MultiThreadedTestCase): + @property + def world_size(self) -> int: + return NUM_DEVICES + + @property + def device_type(self) -> str: + return DEVICE_TYPE + + def build_device_mesh(self): + return DeviceMesh(self.device_type, list(range(self.world_size))) + + def setUp(self) -> None: + super().setUp() + self._spawn_threads() + + +# This is a class for converting args/kwargs of an op into distributed args/kwargs +class DTensorConverter: + def __init__( + self, + mesh: DeviceMesh, + args: Tuple[object, ...], + kwargs: Dict[str, object], + ) -> None: + self.hit = 0 + self.miss = 0 + self.mesh = mesh + self.args = args + self.kwargs = kwargs + flatten_args, flatten_args_spec = tree_flatten(args) + flatten_kwargs, flatten_kwargs_spec = tree_flatten(kwargs) + + self.flatten_args: List[object] = flatten_args + self.flatten_args_spec: TreeSpec = flatten_args_spec + self.flatten_kwargs: List[object] = flatten_kwargs + self.flatten_kwargs_spec: TreeSpec = flatten_kwargs_spec + + choices_for_args = [] + for arg in self.flatten_args: + if isinstance(arg, torch.Tensor): + choices_for_args.append(self.gen_sharding_choices_for_arg(arg)) + + for arg in self.flatten_kwargs: + if isinstance(arg, torch.Tensor): + choices_for_args.append(self.gen_sharding_choices_for_arg(arg)) + + self.sharding_combs: Iterator[Sequence[Placement]] = iter( + itertools.product(*choices_for_args) + ) + + def successful(self) -> bool: + return self.hit > 0 and self.miss == 0 + + def is_supported_tensor(self, t: torch.Tensor) -> bool: + # TODO: dist tensor need to support quantized and sparse + # tensors, quantized tensor might be relatively easy, but + # sparse tensor have special layouts that we need to possibly + # deal with, until we are clear about them, we don't officially + # support them. + return not any( + [ + t.is_sparse_csr, + t.is_sparse, + t.is_mkldnn, + t.is_quantized, + t.is_nested, + torch._is_functional_tensor(t), + t.is_neg(), + t.is_conj(), + t.device.type in ("lazy", "meta"), + # We need a way to test if a tensor is batched but there + # is no official APi to do it + # torch._C._is_batched(t), + ] + ) + + def gen_sharding_choices_for_arg( + self, arg: torch.Tensor + ) -> Sequence[Placement]: + mesh_size = self.mesh.size() + sharding_choices: List[Placement] = [Replicate()] + # c10d collective does not support bool tensor + # for bool tensor we treat it as replicated + if arg.dtype != torch.bool: + # only generating choices with: replicate, or sharding + # evenly on a dimension that could be sharded + sharding_choices = sharding_choices + [ + Shard(i) + for i, s in enumerate(arg.shape) + if s > 1 and s % mesh_size == 0 + ] + # TODO: add multi mesh choices + # all_choices = itertools.product( + # *(self.mesh.ndim * [sharding_choices]) + # ) + return sharding_choices + + def __iter__(self) -> "DTensorConverter": + return self + + def __next__(self) -> Tuple[Tuple[object, ...], Dict[str, object]]: + try: + next_sharding_choices = next(self.sharding_combs) + idx = 0 + + new_args: List[object] = [] + for arg in self.flatten_args: + if isinstance(arg, torch.Tensor): + new_args.append( + self.to_dist_tensor( + arg, self.mesh, [next_sharding_choices[idx]] + ) + ) + idx += 1 + else: + new_args.append(arg) + + new_kwargs: List[object] = [] + for arg in self.flatten_kwargs: + if isinstance(arg, torch.Tensor): + new_kwargs.append( + self.to_dist_tensor( + arg, self.mesh, [next_sharding_choices[idx]] + ) + ) + idx += 1 + else: + new_kwargs.append(arg) + + return ( + tree_unflatten(new_args, self.flatten_args_spec), + tree_unflatten(new_kwargs, self.flatten_kwargs_spec), + ) + except StopIteration as e: + raise StopIteration from e + + def to_dist_tensor( + self, t: torch.Tensor, mesh: DeviceMesh, placements: List[Placement] + ) -> torch.Tensor: + if type(t) is torch.Tensor or type(t) is torch.nn.Parameter: + if self.is_supported_tensor(t): + self.hit += 1 + if t.ndim == 0: + # scalar tensor by default will be replicated + r = distribute_tensor(t, mesh, [Replicate()] * mesh.ndim) + else: + # distribute non-scalar tensors + r = distribute_tensor(t, mesh, placements) + if type(t) is torch.nn.Parameter: + r = torch.nn.Parameter( # type: ignore[assignment] + r, requires_grad=r.requires_grad + ) + return r + else: + self.miss += 1 + return t + elif torch.overrides.is_tensor_like(t): + # Blindly converting tensor subclasses to dist tensor can cause + # unpredictable problems, we explicitly disable this conversion + # for now (i.e. we don't support DTensor holding tensor subclass + # until there's a strong reason later). + self.miss += 1 + return t + else: + raise RuntimeError( + f"Trying to convert to DTensor, but got {type(t)}" + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..69dbfd3f72bb64715120b638aeaf80cf755c0721 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import os +import shutil +import tempfile +from functools import wraps +from typing import Any, Callable, Dict, Optional, Tuple + +import torch.distributed as dist + + +def with_temp_dir( + func: Optional[Callable] = None, +) -> Optional[Callable]: + """ + Wrapper to initialize temp directory for distributed checkpoint. + """ + assert func is not None + + @wraps(func) + def wrapper(self, *args: Tuple[object], **kwargs: Dict[str, Any]) -> None: + # Only create temp_dir when rank is 0 + if dist.get_rank() == 0: + temp_dir = tempfile.mkdtemp() + print(f"Using temp directory: {temp_dir}") + else: + temp_dir = "" + object_list = [temp_dir] + + # Broadcast temp_dir to all the other ranks + os.sync() + dist.broadcast_object_list(object_list) + self.temp_dir = object_list[0] + os.sync() + + try: + func(self, *args, **kwargs) + finally: + if dist.get_rank() == 0: + shutil.rmtree(self.temp_dir, ignore_errors=True) + + return wrapper diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..66ad5df7887032d529a53a17aec9022c8ff5415a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py @@ -0,0 +1,111 @@ +# Owner(s): ["oncall: distributed"] + +import copy +from itertools import chain +from typing import Any, Dict + +import torch +import torch.nn as nn + +from torch.distributed._sharded_tensor import ShardedTensor +from torch.distributed._tensor import DTensor +from torch.distributed.checkpoint._state_dict_utils import _gather_state_dict +from torch.distributed.checkpoint.state_dict import ( + PG, + set_state_dict, + STATE, + StateDictOptions, +) + + +class VerifyStateDictMixin: + def _compare_tensor(self, orig_tensor, dist_tensor): + if isinstance(dist_tensor, (DTensor, ShardedTensor)): + dist_tensor = _gather_state_dict({"mykey": dist_tensor}).pop("mykey") + self.assertTrue(isinstance(dist_tensor, torch.Tensor)) + self.assertTrue(torch.allclose(orig_tensor, dist_tensor)) + + def _verify_msd( + self, + msd: Dict[str, Any], + dist_msd: Dict[str, Any], + options: StateDictOptions = StateDictOptions(), + ) -> None: + if not options.ignore_frozen_params: + self.assertEqual(len(msd), len(dist_msd)) + for fqn, param in msd.items(): + dist_param = dist_msd.get(fqn, None) + if not options.ignore_frozen_params: + self.assertIsNotNone(dist_param) + self._compare_tensor(param, dist_param) + elif dist_param is None: + self.assertFalse(param.requires_grad) + + def _verify_osd( + self, + model: nn.Module, + optim: torch.optim.Optimizer, + osd: Dict[str, Any], + dist_osd: Dict[str, Any], + ) -> None: + params = list(chain.from_iterable(g["params"] for g in optim.param_groups)) + param_pid_mapping = dict(zip(params, range(len(params)))) + fqn_pid_mapping = {} + for fqn, param in model.named_parameters(): + pid = param_pid_mapping[param] + fqn_pid_mapping[fqn] = pid + fqn_pid_mapping[pid] = fqn + # Check optimizer_state_dict state + + self.assertEqual(len(osd[STATE]), len(dist_osd[STATE])) + for pid, states in osd[STATE].items(): + fqn = fqn_pid_mapping[pid] + dist_states = dist_osd[STATE].get(fqn, None) + self.assertIsNotNone(dist_states, fqn) + self.assertEqual(len(states), len(dist_states)) + for key, state in states.items(): + dist_state = states.get(key, None) + self.assertIsNotNone(dist_state) + self._compare_tensor(state, dist_state) + + # Check optimizer_state_dict param_group + old_dist_osd_pg = dist_osd[PG] + if len(osd[PG]) != len(dist_osd[PG]): + self.assertTrue(len(dist_osd[PG]) > len(osd[PG])) + new_pg = copy.deepcopy(dist_osd[PG][0]) + new_pg["params"] = [] + for dist_group in dist_osd[PG]: + new_pg["params"].extend(dist_group["params"]) + dist_osd[PG] = [new_pg] + + self.assertEqual(len(osd[PG]), len(dist_osd[PG])) + for group, dist_group in zip(osd[PG], dist_osd[PG]): + self.assertEqual(len(group), len(dist_group)) + for key, value in group.items(): + # Below doesn't work because param_groups can have None + # values. + # dist_value = dist_group.get(key, None) + # self.assertIsNotNone(dist_value, (dist_group, group)) + dist_value = dist_group[key] + if key == "params": + fqns = [fqn_pid_mapping[pid] for pid in value] + self.assertEqual(sorted(fqns), sorted(dist_value)) + else: + self.assertEqual(value, dist_value) + dist_osd[PG] = old_dist_osd_pg + + def _verify_osd_by_load( + self, + model: nn.Module, + optim: torch.optim.Optimizer, + new_optim: torch.optim.Optimizer, + dist_osd: Dict[str, Any], + ) -> None: + new_dist_osd = _gather_state_dict(dist_osd) + set_state_dict( + model, + optimizers=new_optim, + model_state_dict={}, + optim_state_dict=new_dist_osd, + ) + self.assertEqual(optim.state_dict(), new_optim.state_dict()) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b201f77f2171069dbe5054263cab6cfba1933d00 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py @@ -0,0 +1,733 @@ +#!/usr/bin/env python3 + +import contextlib +import enum +import logging +import os +import threading +from typing import NamedTuple + +import torch +import torch.distributed as dist +import torch.distributed.autograd as dist_autograd +import torch.nn as nn +from torch.distributed import rpc +from torch.distributed.nn import RemoteModule +from torch.nn.parallel import DistributedDataParallel +from torch.testing._internal.common_distributed import ( + requires_gloo, + requires_nccl, + skip_if_lt_x_gpu, + skip_if_rocm, +) +from torch.testing._internal.dist_utils import INIT_METHOD_TEMPLATE, dist_init +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + + +NUM_EM_ROW = 2 +D_SPARSE = 3 +D_DENSE = 2 +D_HID = 3 +D_OUT = 1 +NUM_TRAINERS = 4 +# Trainers + the master + the remote worker +WORLD_SIZE = NUM_TRAINERS + 2 +TRAINER_RANKS = list(range(NUM_TRAINERS)) +REMOTE_WORKER_RANK = TRAINER_RANKS[-1] + 1 +MASTER_RANK = REMOTE_WORKER_RANK + 1 + + +class DdpMode(enum.Enum): + # Don't apply DDP + NONE = enum.auto() + # Apply DDP to the top level nn.Module + OUTSIDE = enum.auto() + # Embed DDP inside the top level nn.Module + INSIDE = enum.auto() + + +def init_logger(): + logger = logging.getLogger(__name__) + level = logging.DEBUG if "debug" in os.environ else logging.INFO + logger.setLevel(level) + console = logging.StreamHandler() + formatter = logging.Formatter( + "%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s" + ) + console.setFormatter(formatter) + console.setLevel(level) + # add the handlers to the logger + logger.addHandler(console) + logger.propagate = False + return logger + + +gLogger = init_logger() + + +class FeatureSet(NamedTuple): + """ A feature set has 2 types of features""" + + dense_features: torch.Tensor + sparse_features: torch.LongTensor + values: torch.Tensor + + +def _call_method(method, rref, *args, **kwargs): + return method(rref.local_value(), *args, **kwargs) + + +def _remote_method(method, rref, *args, **kwargs): + args_tup = tuple([method, rref] + list(args)) + return rpc.rpc_sync(rref.owner(), _call_method, args=args_tup, kwargs=kwargs) + + +def _remote_method_async(method, rref, *args, **kwargs): + args_tup = tuple([method, rref] + list(args)) + return rpc.rpc_async(rref.owner(), _call_method, args=args_tup, kwargs=kwargs) + + +class RemoteEM(nn.Module): + def __init__(self, num_embeddings: int, embedding_dim: int): + gLogger.info("Initing RemoteEM with %s %s", num_embeddings, embedding_dim) + super().__init__() + init_em = [0.5] * embedding_dim + self.em = nn.EmbeddingBag( + num_embeddings, + embedding_dim, + _weight=torch.tensor([init_em] * num_embeddings), + ) + + def forward(self, input: torch.Tensor): + gLogger.debug("Running RemoteEM.forward() on: %s", input) + return self.em(input, offsets=torch.LongTensor(range(input.shape[0]))) + + +# Return a linear module with predefined parameters. +def getLinear(d_in, d_out): + l = nn.Linear(d_in, d_out, bias=False) + w = torch.ones((d_out, d_in)) + w[0][0] = -1 + w.requires_grad_() + l.weight.data = w + return l + + +class RemoteNet(nn.Module): + def __init__(self, d_in: int, d_out: int): + gLogger.info("Initing RemoteNet with %s %s", d_in, d_out) + super().__init__() + self.fc = getLinear(d_in, d_out) + self.relu = nn.ReLU() + + def forward(self, input: torch.Tensor): + gLogger.debug("Running RemoteNet.forward() on: %s", input) + return self.relu(self.fc(input)) + + +class HybridModel(nn.Module): + def __init__( + self, + remote_em_rref: rpc.RRef, + remote_net_rref: rpc.RRef, + process_group_for_ddp: dist.ProcessGroup = None, + ): + super().__init__() + self.remote_em_rref = remote_em_rref + self.remote_net_rref = remote_net_rref + self.fc1 = getLinear(D_DENSE, D_DENSE) + self.fc2 = getLinear(D_HID, D_OUT) + + self.non_ddp_params = tuple(self.fc1.parameters()) + tuple( + self.fc2.parameters() + ) + self.ddp_params = () + + if process_group_for_ddp is not None: + self.non_ddp_params, self.ddp_params = ( + tuple(self.fc1.parameters()), + tuple(self.fc2.parameters()), + ) + gLogger.info("Use DDP for the second local net.") + self.fc2 = DistributedDataParallel( + self.fc2, check_reduction=True, process_group=process_group_for_ddp + ) + + gLogger.info( + "HybridModel has %s groups of parameters.", len(list(self.parameters())) + ) + + def forward(self, input: FeatureSet): + gLogger.debug("Running HybridModel.forward on %s", input) + sparse = _remote_method( + RemoteEM.forward, self.remote_em_rref, input.sparse_features + ) + # The same size of mini batch. + assert sparse.shape[0] == input.dense_features.shape[0] + dense = self.fc1(input.dense_features) + x = torch.cat((dense, sparse), 1) + gLogger.debug("Concatenated feature: %s", x) + x = _remote_method(RemoteNet.forward, self.remote_net_rref, x) + return self.fc2(x) + + +class Trainer: + def __init__( + self, + remote_em_rref: rpc.RRef, + remote_net_rref: rpc.RRef, + ddp_mode: DdpMode, + rank: int, + ): + self.rank = rank + self.trainer_group = ( + dist.new_group(TRAINER_RANKS) + if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE) + else None + ) + self.remote_em_rref = remote_em_rref + self.remote_net_rref = remote_net_rref + self.hybrid_module = HybridModel( + self.remote_em_rref, + self.remote_net_rref, + self.trainer_group if ddp_mode in (DdpMode.INSIDE,) else None, + ) + self.ddp_params, self.non_ddp_params = ( + self.hybrid_module.ddp_params, + self.hybrid_module.non_ddp_params, + ) + if ddp_mode == DdpMode.OUTSIDE: + gLogger.info("Wrapping the whole hybrid module into DDP.") + self.ddp_params += self.non_ddp_params + self.non_ddp_params = () + self.hybrid_module = DistributedDataParallel( + self.hybrid_module, + check_reduction=True, + process_group=self.trainer_group, + ) + gLogger.info( + "Succeeded in creating a HybridModel instance with " + "%s ddp params and %s other local params.", + len(self.ddp_params), len(self.non_ddp_params) + ) + + def destroy_pg(self): + if self.trainer_group: + dist.destroy_process_group(self.trainer_group) + + def train_batch( + self, + mini_batch: FeatureSet, + trainer_has_less_inputs: bool, + simulate_uneven_inputs: bool, + ): + grads_dict = None + + if not simulate_uneven_inputs: + input_batches = [mini_batch] + else: + # Split into microbatches, and trim to simulate uneven inputs. + dense_features = mini_batch.dense_features + sparse_features = mini_batch.sparse_features + values = mini_batch.values + + dense_microbatch = torch.split(dense_features, 2) + sparse_microbatch = torch.split(sparse_features, 2) + values_microbatch = torch.split(values, 2) + batches = [] + for d, s, v in zip(dense_microbatch, sparse_microbatch, values_microbatch): + feature_set = FeatureSet(dense_features=d, sparse_features=s, values=v) + batches.append(feature_set) + + if trainer_has_less_inputs: + input_batches = batches[: len(batches) // 2] + gLogger.info( + "Trainer reduced input patches from %s " + "to %s to simulate uneven inputs.", + len(batches), len(input_batches) + ) + else: + input_batches = batches + + with self.hybrid_module.join() if simulate_uneven_inputs else contextlib.nullcontext(): + for b in input_batches: + with dist_autograd.context() as context_id: + output = self.hybrid_module.forward(b) + loss = (output * mini_batch.values).sum() + dist_autograd.backward(context_id, [loss]) + grads_dict = dist_autograd.get_gradients(context_id) + gLogger.info( + "Loss is %s for mini batch: %s. " + "Grads dict has %s entries: %s", loss, mini_batch, len(grads_dict), grads_dict + ) + return ( + tuple(grads_dict[param] for param in self.ddp_params), + tuple(grads_dict[param] for param in self.non_ddp_params), + ) + + +def get_training_examples(): + n = 16 + training_examples = FeatureSet( + dense_features=torch.zeros((n, D_DENSE)), + sparse_features=torch.zeros(n, dtype=torch.long), + values=torch.zeros(n), + ) + idx = 0 + # Every example has another one that has exactly the same features but an + # opposite value. Therefore, their grads cancel each other in all-reduce. + for value in (-1, 1): + for x in (-1.0 * value, 1.0 * value): + for y in (1.0 * value, -1.0 * value): + for z in (0, 1): + training_examples.dense_features[idx, :] = torch.tensor((x, y)) + training_examples.sparse_features[idx] = z + training_examples.values[idx] = value + idx += 1 + + # Split the examples among NUM_TRAINERS trainers + assert 0 == (n % NUM_TRAINERS) + examples_per_trainer = int(n / NUM_TRAINERS) + return [ + FeatureSet( + dense_features=training_examples.dense_features[ + start : start + examples_per_trainer, : + ], + sparse_features=training_examples.sparse_features[ + start : start + examples_per_trainer + ], + values=training_examples.values[start : start + examples_per_trainer], + ) + for start in range(0, n, examples_per_trainer) + ] + + +shutdown_signal = threading.Condition() + + +def set_shutdown_signal(): + global shutdown_signal + with shutdown_signal: + shutdown_signal.notify() + + +class DdpUnderDistAutogradTest(RpcAgentTestFixture): + @property + def world_size(self) -> int: + return WORLD_SIZE + + def remote_worker_name(self) -> str: + # The name has to be consistent with that in 'dist_init' decorator. + return f"worker{REMOTE_WORKER_RANK}" + + def trainer_name(self, rank): + # The name has to be consistent with that in 'dist_init' decorator. + return f"worker{rank}" + + def _remote_worker_process(self, ddp_mode): + gLogger.info("The remote worker is running.") + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE): + # new_group needs to be called on ranks. + dist.new_group(TRAINER_RANKS) + + global shutdown_signal + with shutdown_signal: + shutdown_signal.wait() + gLogger.info("Exiting remote worker.") + dist.destroy_process_group() + + def _trainer_process(self, rank: int): + gLogger.info("Running the trainer #%s...", rank) + gLogger.info( + "Initing trainer process group by trainer #%s with ranks %s", rank, TRAINER_RANKS + ) + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + gLogger.info("Waiting for shutdown signal on trainer #%s...", rank) + + global shutdown_signal + with shutdown_signal: + shutdown_signal.wait() + gLogger.info("Exiting the trainer #%s...", rank) + dist.destroy_process_group() + + def _master_process(self, ddp_mode: DdpMode, simulate_uneven_inputs: bool): + gLogger.info("Running the master process...") + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + remote_em_rref = rpc.remote( + self.remote_worker_name(), RemoteEM, args=(NUM_EM_ROW, D_SPARSE) + ) + remote_net_rref = rpc.remote( + self.remote_worker_name(), RemoteNet, args=(D_DENSE + D_SPARSE, D_HID) + ) + gLogger.info("Created remote rrefs on master") + self.do_test_on_master( + ddp_mode, simulate_uneven_inputs, remote_em_rref, remote_net_rref + ) + + def do_test_on_master( + self, + ddp_mode: DdpMode, + simulate_uneven_inputs: bool, + remote_em_rref: rpc.RRef, + remote_net_rref: rpc.RRef, + ): + if simulate_uneven_inputs: + gLogger.info( + "Running DDP + RPC test with simulating uneven inputs across trainers." + ) + + trainer_rrefs = [] + for rank in TRAINER_RANKS: + trainer = self.trainer_name(rank) + trainer_rrefs.append( + rpc.remote( + trainer, + Trainer, + args=(remote_em_rref, remote_net_rref, ddp_mode, rank), + ) + ) + + if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE): + # new_group needs to be called on ranks. + dist.new_group(TRAINER_RANKS) + + training_examples = get_training_examples() + for _ in range(3): + futures = [] + num_trainers = len(trainer_rrefs) + for idx, trainer_rref in enumerate(trainer_rrefs): + # Half the trainers will deplete inputs earlier than the rest. + trainer_has_less_inputs = ( + simulate_uneven_inputs and idx < num_trainers // 2 + ) + futures.append( + _remote_method_async( + Trainer.train_batch, + trainer_rref, + training_examples[idx], + trainer_has_less_inputs, + simulate_uneven_inputs, + ) + ) + + for future in futures: + ddp_grads, non_ddp_grads = future.wait() + # When there are uneven inputs, it is not necessary that grads + # cancel each other out, since some trainers contribute 0 grad. + if not simulate_uneven_inputs: + for grad in ddp_grads: + self.assertEqual( + grad, + torch.zeros_like(grad), + msg=f"The grad for any ddp parameter should be zeros, because " + "the training examples' grads cancel each other. Received " + f"gradient {grad}", + ) + for grad in non_ddp_grads: + self.assertNotEqual( + grad, + torch.zeros_like(grad), + msg="The grad for any non-ddp parameter shouldn't be zeros", + ) + + # Destroy process groups + for idx, trainer_rref in enumerate(trainer_rrefs): + _remote_method_async(Trainer.destroy_pg, trainer_rref).wait() + + # Send shutdown signals. + for rank in TRAINER_RANKS: + trainer = self.trainer_name(rank) + rpc.rpc_sync(trainer, set_shutdown_signal, args=()) + + rpc.rpc_sync(self.remote_worker_name(), set_shutdown_signal, args=()) + + def _do_test(self, ddp_mode, simulate_uneven_inputs=False): + if self.rank == MASTER_RANK: + self._master_process(ddp_mode, simulate_uneven_inputs) + elif self.rank == REMOTE_WORKER_RANK: + self._remote_worker_process(ddp_mode) + elif self.rank in TRAINER_RANKS: + self._trainer_process(self.rank) + else: + raise RuntimeError(f"Unknown process rank: {self.rank}") + + @requires_gloo() + @dist_init + def test_backward_no_ddp(self): + self._do_test(DdpMode.NONE) + + @requires_gloo() + @dist_init + def test_backward_ddp_outside(self): + self._do_test(DdpMode.OUTSIDE) + + @requires_gloo() + @dist_init + def test_backward_ddp_outside_uneven_inputs(self): + self._do_test(DdpMode.OUTSIDE, simulate_uneven_inputs=True) + + @requires_gloo() + @dist_init + def test_backward_ddp_inside(self): + self._do_test(DdpMode.INSIDE) + + +# Common utils for both CPU and CUDA test suites +class CommonDdpComparisonTest(RpcAgentTestFixture): + @property + def world_size(self) -> int: + return NUM_TRAINERS + + def trainer_name(self, rank): + # The name has to be consistent with that in 'dist_init' decorator. + return f"worker{rank}" + + @staticmethod + def get_remote_grads(rref, context_id): + return dist_autograd.get_gradients(context_id)[rref.local_value().weight] + + +class DdpComparisonTest(CommonDdpComparisonTest): + def _run_test_ddp_comparision(self, simulate_uneven_inputs=False): + gLogger.info("Running trainer rank: %s", self.rank) + # Each trainer uses a different random seed. Otherwise, they are going + # to have exactly the same initial model parameters, input, and + # therefore grads. That means the grads will be the same before and + # after DDP's all-reduce. + torch.manual_seed(self.rank) + dist.init_process_group( + backend="gloo", + # Postfix file_name with "pg" since file_name is also used by RPC agent + init_method=INIT_METHOD_TEMPLATE.format(file_name=f"{self.file_name}_pg"), + world_size=self.world_size, + rank=self.rank, + ) + net = nn.Linear(2, 3) + ddp_net = DistributedDataParallel(net) + + # Odd ranks join early if simulate_uneven_inputs. + num_inputs = 1 + if simulate_uneven_inputs: + if self.rank % 2 == 0: + num_inputs += 2 + inputs_list = [torch.rand((3, 2)) for _ in range(num_inputs)] + + if simulate_uneven_inputs: + gLogger.info("Rank %s training with %s inputs.", self.rank, len(inputs_list)) + + # Use distributed autograd. The gradients will be in RPC context map. + grads_dict = {} + with ddp_net.join(simulate_uneven_inputs): + for i, inputs in enumerate(inputs_list): + with dist_autograd.context() as context_id: + loss = ddp_net(inputs).norm() + dist_autograd.backward(context_id, [loss]) + grads_dict = dist_autograd.get_gradients(context_id) + gLogger.info("Trainer #%s got grad dict: %s", self.rank, grads_dict) + + # Use local autograd. The gradients will be in each variable's '.grad'. + ddp_net.zero_grad() + loss = ddp_net(inputs).norm() + loss.backward() + + # The gradients should be the same + for param in net.parameters(): + self.assertTrue( + param in grads_dict, + msg=f"Param {param} is not in dist_auto grad dict {grads_dict} for iteration {i}", + ) + self.assertEqual( + grads_dict[param], + param.grad, + msg=f"The grads for param {param} are different under local " + f"and dist autograd: {param.grad} \n---\n {grads_dict[param]} for iteration {i}", + ) + dist.destroy_process_group() + + @requires_gloo() + @dist_init + def test_ddp_comparison(self): + self._run_test_ddp_comparision() + + @requires_gloo() + @dist_init + def test_ddp_comparison_uneven_inputs(self): + # test with simulating uneven inputs in DDP + self._run_test_ddp_comparision(simulate_uneven_inputs=True) + + @requires_gloo() + @dist_init + def test_ddp_dist_autograd_sparse_grads(self): + # Each trainer uses a different random seed. Otherwise, they are going + # to have exactly the same initial model parameters, input, and + # therefore grads. That means the grads will be the same before and + # after DDP's all-reduce. + torch.manual_seed(self.rank) + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + model = nn.EmbeddingBag(10, 3, sparse=True) + ddp_model = DistributedDataParallel(model) + + # Different inputs for each + input = torch.LongTensor(10).random_(0, 10) + offsets = torch.LongTensor([0, 4]) + + # Run local. + loss = ddp_model(input, offsets).sum() + loss.backward() + + with dist_autograd.context() as context_id: + loss = ddp_model(input, offsets).sum() + dist_autograd.backward(context_id, [loss]) + grads_dict = dist_autograd.get_gradients(context_id) + self.assertEqual(1, len(grads_dict)) + self.assertEqual(model.weight.grad, grads_dict[model.weight]) + + @requires_gloo() + @dist_init + def test_ddp_dist_autograd_local_vs_remote(self): + # Each trainer uses a different random seed. Otherwise, they are going + # to have exactly the same initial model parameters, input, and + # therefore grads. That means the grads will be the same before and + # after DDP's all-reduce. + torch.manual_seed(self.rank) + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + # Use two different remote device input string, w/ and w/o the default + # device string "cpu", respectively. + for remote_device in ["worker0/cpu", "worker0"]: + remote_layer1 = RemoteModule( + remote_device=remote_device, module_cls=nn.Linear, args=(10, 5, False) + ) + layer1 = nn.Linear(10, 5, False) + # Start with the same parameters for remote and local + layer1.weight = remote_layer1.module_rref.to_here().weight + + # Run local case. + layer2 = nn.Linear(5, 1) + inputs = torch.rand((10, 10)) + ddp_model = DistributedDataParallel(layer2) + loss = ddp_model(layer1(inputs)).sum() + loss.backward() + + # Run remote case. + with dist_autograd.context() as context_id: + loss = ddp_model(remote_layer1(inputs)).sum() + dist_autograd.backward(context_id, [loss]) + grads_dict = dist_autograd.get_gradients(context_id) + dist.barrier() + self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight]) + self.assertEqual( + layer1.weight.grad, + rpc.rpc_sync( + "worker0", + CommonDdpComparisonTest.get_remote_grads, + args=(remote_layer1.module_rref, context_id), + ), + ) + + +class CudaDdpComparisonTest(CommonDdpComparisonTest): + @skip_if_lt_x_gpu(NUM_TRAINERS) + @requires_nccl() + @dist_init + @skip_if_rocm + def test_ddp_dist_autograd_local_vs_remote_gpu(self): + # Each trainer uses a different random seed. Otherwise, they are going + # to have exactly the same initial model parameters, input, and + # therefore grads. That means the grads will be the same before and + # after DDP's all-reduce. + torch.manual_seed(self.rank) + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + remote_layer1 = RemoteModule( + remote_device="worker0/cpu", module_cls=nn.Linear, args=(10, 7, False) + ) + layer1 = nn.Linear(10, 7, False) + # Start with the same parameters for remote and local + layer1.weight = remote_layer1.module_rref.to_here().weight + + layer2 = nn.Linear(7, 5).cuda(self.rank) + ddp_layer2 = DistributedDataParallel(layer2, device_ids=[self.rank]) + + remote_layer3 = RemoteModule( + remote_device="worker0/cpu", module_cls=nn.Linear, args=(5, 3, False) + ) + layer3 = nn.Linear(5, 3, False) + # Start with the same parameters for remote and local + layer3.weight = remote_layer3.module_rref.to_here().weight + + layer4 = nn.Linear(3, 1).cuda(self.rank) + ddp_layer4 = DistributedDataParallel(layer4, device_ids=[self.rank]) + + # Run local case. + inputs = torch.rand((10, 10)) + loss = ddp_layer4( + layer3(ddp_layer2(layer1(inputs).cuda(self.rank)).cpu()).cuda(self.rank) + ).sum() + loss.backward() + + # Run remote case. + with dist_autograd.context() as context_id: + loss = ddp_layer4( + remote_layer3( + ddp_layer2(remote_layer1(inputs).cuda(self.rank)).cpu() + ).cuda(self.rank) + ).sum() + dist_autograd.backward(context_id, [loss]) + grads_dict = dist_autograd.get_gradients(context_id) + dist.barrier() + self.assertEqual( + layer1.weight.grad, + rpc.rpc_sync( + "worker0", + CommonDdpComparisonTest.get_remote_grads, + args=(remote_layer1.module_rref, context_id), + ), + ) + self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight]) + self.assertEqual( + layer3.weight.grad, + rpc.rpc_sync( + "worker0", + CommonDdpComparisonTest.get_remote_grads, + args=(remote_layer3.module_rref, context_id), + ), + ) + self.assertEqual(layer4.weight.grad, grads_dict[layer4.weight]) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a8db76ed3338a94f19301f8aa47b683cf5cf0e9c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py @@ -0,0 +1,10108 @@ +import copy +import itertools +import math +import os +import random +import sys +import tempfile +import time +from collections import namedtuple, OrderedDict +from contextlib import contextmanager, nullcontext +from dataclasses import dataclass +from datetime import timedelta +from functools import reduce +from typing import Union, NamedTuple, Callable, Any +import unittest +import numpy as np +import torch +import torch.cuda +import torch.distributed as dist +import torch.distributed.algorithms.model_averaging.averagers as averagers +import torch.distributed.algorithms.model_averaging.hierarchical_model_averager as hierarchicalSGD +import torch.distributed.algorithms.model_averaging.utils as model_averaging_utils +import torch.nn as nn +import torch.nn.functional as F +from torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR +from torch._utils_internal import TEST_MASTER_PORT as MASTER_PORT +from torch.cuda.amp import GradScaler, autocast + +from torch.distributed.algorithms.ddp_comm_hooks import ( + post_localSGD_hook as post_localSGD, + powerSGD_hook as powerSGD, + default_hooks as default, + quantization as quantization_hooks, +) +from torch.distributed.optim import _apply_optimizer_in_backward + +from torch.distributed.distributed_c10d import ( + get_world_size, + _get_default_group, + AllreduceOptions, + GroupMember, +) +from torch.distributed.utils import ( + _verify_param_shape_across_processes, + _sync_module_states, +) + +from torch.nn.parallel import DistributedDataParallel +from torch.nn.parallel.distributed import _dump_DDP_relevant_env_vars, _MixedPrecision +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + TEST_SKIPS, + init_multigpu_helper, + initialize_temp_directories, + cleanup_temp_dir, + simple_sparse_reduce_tests, + skip_if_rocm, + skip_if_small_worldsize, + skip_if_odd_worldsize, + skip_if_lt_x_gpu, + nccl_skip_if_lt_x_gpu, + skip_if_no_gpu, + require_n_gpus_for_nccl_backend, + requires_nccl_version, + captured_output, + with_nccl_blocking_wait, + with_dist_debug_levels, + verify_ddp_error_logged, + DistTestCases, +) +from torch.testing._internal.common_utils import ( + instantiate_parametrized_tests, + IS_MACOS, + IS_WINDOWS, + FILE_SCHEMA, + IS_FBCODE, + NO_MULTIPROCESSING_SPAWN, + IS_SANDCASTLE, + skip_but_pass_in_sandcastle, + skip_but_pass_in_sandcastle_if, +) + +import torch.distributed.optim.post_localSGD_optimizer as post_localSGD_optimizer + +from torch.utils.data.distributed import DistributedSampler + +try: + import torchvision + + HAS_TORCHVISION = True +except ImportError: + HAS_TORCHVISION = False + +if sys.platform == "win32": + import msvcrt +else: + import fcntl + + +class NetWithBuffers(nn.Module): + def __init__(self): + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 1, bias=False) + self.register_buffer("buffer", torch.randn(1, 2)) + + def forward(self, x): + self.buffer.add_(1) + return self.b(self.a(x)) + + +class Foo: + def __init__(self, x): + # Can be tensor or int + self.x = x + + def __eq__(self, other): + def eq(value, other): + if isinstance(value, torch.Tensor): + return torch.equal(value, other) + return value == other + + for attr, value in self.__dict__.items(): + other_value = other.__dict__[attr] + if not eq(value, other_value): + return False + return True + + +f = Foo(10) +f.bar = 1 + +foo_cpu_tensor = Foo(torch.randn(3, 3)) + + +COLLECTIVES_OBJECT_TEST_LIST = [ + {"key1": 3, "key2": 4, "key3": {"nested": True}}, + f, + foo_cpu_tensor, + "foo", + [1, 2, True, "string", [4, 5, "nested"]], +] + +# Allowlist of distributed backends where profiling collectives is supported. +PROFILING_SUPPORTED_BACKENDS = [ + dist.Backend.NCCL, + dist.Backend.GLOO, + dist.Backend.MPI, + dist.Backend.UCC, +] + +# Allowlist of distributed backends where profiling is supported with use_cuda=True +CUDA_PROFILING_SUPPORTED_BACKENDS = [ + dist.Backend.GLOO, + dist.Backend.MPI, + dist.Backend.NCCL, + dist.Backend.UCC, +] + +# Allowlist of distributed backends where profiling is supported for p2p ops +SEND_RECV_PROFILING_SUPPORTED_BACKENDS = [ + dist.Backend.MPI, + dist.Backend.GLOO, + dist.Backend.NCCL, + dist.Backend.UCC, +] + +# Dummy NamedTuple data structures to test DDP support for NamedTuple types. +EXPECTED_FIELDS = ("a", "b") +TestNamedTupleInput_0 = namedtuple("NamedTuple", EXPECTED_FIELDS) + + +class TestNamedTupleInput_1(NamedTuple): + a: torch.tensor + b: torch.tensor + + +skipIfNoTorchVision = skip_but_pass_in_sandcastle_if( + not HAS_TORCHVISION, "no torchvision" +) + +BACKEND = os.environ["BACKEND"] +INIT_METHOD = os.getenv("INIT_METHOD", "env://") + +DEFAULT_TIMEOUT = 300 +CUSTOMIZED_TIMEOUT = {"test_DistributedDataParallel": 500} + + +def get_profiling_event(event_name, profiler): + event_list = ( + profiler.events() + if isinstance(profiler, torch.profiler.profile) + else profiler.function_events + ) + return [ + event for event in event_list if ( + event.name.endswith(event_name) or event.name.startswith(event_name) + ) + ] + + +# Base error message substring on unfinished reductions. +ddp_prev_reduction_unfinished_str = ( + "Expected to have finished reduction in the prior iteration" +) +# Error message substring when find_unused_parameters=True has not been passed +ddp_recommend_find_unused_params_str = ( + "passing the keyword argument `find_unused_parameters=True`" +) +# Error message substring when find_unused_parameters=True is enabled +ddp_find_unused_params_enabled_str = "Since `find_unused_parameters=True` is enabled" +# Error message substring for possibility of not all model outputs being used +# in loss computation +ddp_outputs_not_used_in_loss_str = ( + "`forward` function outputs participate in calculating loss" +) +# Error message substring suggesting to use TORCH_DISTRIBUTED_DEBUG +ddp_suggest_debug_mode_str = ( + "set the environment variable TORCH_DISTRIBUTED_DEBUG to either INFO or DETAIL" +) + + +class DDPUnevenTestInput(NamedTuple): + name: str + model: nn.Module + inp: Union[torch.tensor, tuple] + sync_interval: int + throw_on_early_termination: bool = False + hook: Callable = None + state: Any = None + + +class _FC2(nn.Module): + def __init__(self): + super().__init__() + self.fc = nn.Linear(10, 50, bias=True) + self.fc.bias.requires_grad = False + + def forward(self, x): + x = self.fc(x) + return x + + +class Net(nn.Module): + def __init__(self): + super().__init__() + self.fc1 = nn.Linear(2, 10, bias=False) + self.fc2 = _FC2() + self.fc3 = nn.Linear(50, 4, bias=False) + self.relu = nn.ReLU() + self.no_grad_param = nn.Parameter( + torch.tensor([2, 2]).long(), requires_grad=False + ) + + def forward(self, x): + x = self.relu(self.fc1(x)) + x = self.relu(self.fc2(x)) + x = self.fc3(x) + return F.softmax(x, dim=1) + + +class LargeNet(nn.Module): + def __init__(self): + super().__init__() + self.fc1 = nn.Linear(1000, 2000, bias=False) + self.fc2 = nn.Linear(2000, 500, bias=False) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + +class Task(nn.Module): + def __init__(self): + super().__init__() + self.p = nn.Parameter(torch.ones(2, 2)) + + def forward(self, x): + return self.p + x + + +class BatchNormNet(nn.Module): + def __init__(self, affine=True): + super().__init__() + self.fc1 = nn.Linear(2, 40, bias=False) + self.bn = nn.BatchNorm1d(4, affine=affine) + self.fc2 = nn.Linear(40, 4, bias=False) + + def forward(self, x): + x = torch.reshape(self.fc1(x), (-1, 4, 10)) + x = self.bn(x) + x = torch.reshape(x, (-1, 40)) + x = self.fc2(x) + return F.softmax(x, dim=1) + + +class UnusedParamTwoLinLayerNet(nn.Module): + def __init__(self): + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 10, bias=False) + self.c = nn.Linear(5, 5, bias=False) + + def forward(self, x): + a = self.a(x) + b = self.b(x) + return (a, b) + + +class DictOutputModule(nn.Module): + def __init__(self): + super().__init__() + self.module = UnusedParamTwoLinLayerNet() + + def forward(self, x): + predictions = self.module(x) + loss = (predictions[0] + predictions[1]).sum() + return { + "predictions": predictions, + "loss": loss, + } + + +class TwoLinLayerNet(nn.Module): + def __init__(self): + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 1, bias=False) + + def forward(self, x): + a = self.a(x) + b = self.b(x) + return (a, b) + + +class EmbeddingNetDifferentParams(nn.Module): + """ + A module containing an embedding with different dimension or different # of + parameters depending on the rank. + """ + + def __init__(self, rank, diff_num_params=False): + super().__init__() + embedding_dim = 500 if diff_num_params or rank == 0 else 50 + self.embedding = nn.Embedding(num_embeddings=10, embedding_dim=embedding_dim) + self.lin = nn.Linear(embedding_dim, 1) + if diff_num_params: + self.lin2 = nn.Linear(1, 1, bias=False) + + def forward(self, x): + x = self.embedding(x) + return self.lin(x) + + +class ControlFlowToyModel(nn.Module): + def __init__(self): + super().__init__() + self.lin1 = nn.Linear(10, 10, bias=False) + self.lin2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + # Second layer is used dependent on input x. + use_second_layer = torch.equal(x, torch.ones(20, 10, device=x.device)) + if use_second_layer: + return self.lin2(F.relu(self.lin1(x))) + else: + return F.relu(self.lin1(x)) + + +DDP_NET = Net() +BN_NET = BatchNormNet() +BN_NET_NO_AFFINE = BatchNormNet(affine=False) +ONLY_SBN_NET = nn.SyncBatchNorm(2, momentum=0.99) + + +def get_timeout(test_id): + test_name = test_id.split(".")[-1] + if test_name in CUSTOMIZED_TIMEOUT: + return CUSTOMIZED_TIMEOUT[test_name] + else: + return DEFAULT_TIMEOUT + + +default_pg_timeout = 60 + +CUSTOM_PG_TIMEOUT = { + # This test runs slowly and needs additional time to complete, otherwise can + # be taken down by TORCH_NCCL_ASYNC_ERROR_HANDLING + "test_ddp_uneven_inputs": 300, + # This test has a short timeout since it tests being taken down by + # TORCH_NCCL_ASYNC_ERROR_HANDLING which we want to happen quickly. + "test_ddp_model_diff_across_ranks": 5, + # This test has a short timeout since it tests being taken down by + # TORCH_NCCL_ASYNC_ERROR_HANDLING which we want to happen quickly. + "test_ddp_has_finalized": 5, +} + +def require_backend_is_available(backends): + def check(backend): + if backend == dist.Backend.GLOO: + return dist.is_gloo_available() + if backend == dist.Backend.NCCL: + return dist.is_nccl_available() + if backend == dist.Backend.MPI: + return dist.is_mpi_available() + if backend == dist.Backend.UCC: + return dist.is_ucc_available() + if backend in DistTestCases.backend_feature["plugin"]: + return True + return False + + if BACKEND not in backends: + return skip_but_pass_in_sandcastle( + f"Test requires backend {BACKEND} to be one of {backends}" + ) + + if not check(dist.Backend(BACKEND)): + return skip_but_pass_in_sandcastle( + f"Test requires backend {BACKEND} to be available" + ) + return lambda func: func + + +def require_world_size(world_size): + if int(os.environ["WORLD_SIZE"]) < world_size: + return skip_but_pass_in_sandcastle( + "Test requires world size of %d" % world_size + ) + return lambda func: func + + +@contextmanager +def _lock(): + TEMP_DIR = os.environ["TEMP_DIR"] + lockfile = os.path.join(TEMP_DIR, "lockfile") + with open(lockfile, "w") as lf: + try: + if sys.platform == "win32": + msvcrt.locking(lf.fileno(), msvcrt.LK_RLCK, 1) + yield + else: + fcntl.flock(lf.fileno(), fcntl.LOCK_EX) + yield + finally: + if sys.platform == "win32": + msvcrt.locking(lf.fileno(), msvcrt.LK_UNLCK, 1) + else: + fcntl.flock(lf.fileno(), fcntl.LOCK_UN) + lf.close() + + +@contextmanager +def _rank_temp_file(): + if dist.get_rank() == 0: + fd, name = tempfile.mkstemp() + os.close(fd) + else: + name = None + object_list = [name] + dist.broadcast_object_list(object_list) + name = object_list[0] + try: + yield name + finally: + if dist.get_rank() == 0: + os.remove(name) + + +def _build_tensor(size, value=None, dtype=torch.float, device_id=None): + if value is None: + value = size + if device_id is None: + return torch.empty(size, size, size, dtype=dtype).fill_(value) + else: + return torch.empty(size, size, size, dtype=dtype).fill_(value).cuda(device_id) + + +def _build_multidim_tensor(dim, dim_size, value=None, dtype=torch.float): + if value is None: + value = dim + return torch.empty(size=[dim_size for _ in range(dim)], dtype=dtype).fill_(value) + + +def _create_autograd_profiler(): + return torch.autograd.profiler.profile(record_shapes=True) + + +def _create_torch_profiler(): + return torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + ], + record_shapes=True, + ) + + +class Barrier: + barrier_id = 0 + + @classmethod + def init(cls): + cls.barrier_id = 0 + barrier_dir = os.path.join(os.environ["TEMP_DIR"], "barrier") + for f_name in os.listdir(barrier_dir): + os.unlink(os.path.join(barrier_dir, f_name)) + + @classmethod + def sync(cls, wait_for=None, timeout=10): + if wait_for is None: + wait_for = dist.get_world_size() + cls.barrier_id += 1 + barrier_dir = os.path.join(os.environ["TEMP_DIR"], "barrier") + pid = str(os.getpid()) + barrier_file = os.path.join(barrier_dir, pid) + with _lock(): + with open(barrier_file, "w") as f: + f.write(str(cls.barrier_id)) + + start_time = time.time() + while True: + arrived = 0 + with _lock(): + for f_name in os.listdir(barrier_dir): + with open(os.path.join(barrier_dir, f_name)) as f: + data = f.read() + if int(data) >= cls.barrier_id: + arrived += 1 + if arrived == wait_for: + break + + if time.time() - start_time > timeout: + raise RuntimeError("barrier timeout") + time.sleep(0.1) + + +class TestDistBackend(MultiProcessTestCase): + @classmethod + def setUpClass(cls): + os.environ["MASTER_ADDR"] = str(MASTER_ADDR) + # Not setting MASTER_PORT and get a random free port + super().setUpClass() + + def setUp(self): + super().setUp() + # initialize temp directories + initialize_temp_directories() + # initialize Barrier + Barrier.init() + # Skip return code checking for following tests as they are expected to + # crash a process due to TORCH_NCCL_ASYNC_ERROR_HANDLING. + self.skip_return_code_checks = [self.test_ddp_has_finalized.__wrapped__] + + def tearDown(self): + cleanup_temp_dir() + super().tearDown() + + @property + def init_method(self): + return f"{FILE_SCHEMA}{self.file_name}" + + @classmethod + def _run(cls, rank, test_name, file_name, pipe): + if BACKEND == "nccl" and not torch.cuda.is_available(): + sys.exit(TEST_SKIPS["no_cuda"].exit_code) + self = cls(test_name) + self.rank = rank + self.file_name = file_name + + if torch.cuda.is_available() and torch.cuda.device_count() < int( + self.world_size + ): + sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) + try: + pg_timeout_seconds = CUSTOM_PG_TIMEOUT.get(test_name, default_pg_timeout) + timeout = timedelta(seconds=pg_timeout_seconds) + dist.init_process_group( + init_method=self.init_method, + backend=BACKEND, + world_size=int(self.world_size), + rank=self.rank, + timeout=timeout, + ) + except RuntimeError as e: + if "recompile" in e.args[0]: + sys.exit(TEST_SKIPS["backend_unavailable"].exit_code) + + raise + + # Execute barrier prior to running test to ensure that every process + # has finished initialization and that the following test + # immediately exiting due to a skip doesn't cause flakiness. + self._barrier() + + self.run_test(test_name, pipe) + self._barrier() + dist.destroy_process_group() + sys.exit(0) + + # Needed since MultiProcessTestCase assumes a world_size of 4, but we + # run these tests under other various world_sizes. + @property + def world_size(self): + return os.environ["WORLD_SIZE"] + + +class DistributedTest: + class _DistTestBase: + def _barrier(self, *args, **kwargs): + Barrier.sync(*args, **kwargs) + + def _init_group_test(self, **kwargs): + group = [1, 2] + group_id = dist.new_group(group, **kwargs) + rank = dist.get_rank() + if rank not in group: + return ([], None, rank) + + return (group, group_id, rank) + + def _init_full_group_test(self, **kwargs): + group = list(range(0, dist.get_world_size())) + group_id = dist.new_group(**kwargs) + rank = dist.get_rank() + return (group, group_id, rank) + + def _init_global_test(self): + group = list(range(0, dist.get_world_size())) + group_id = dist.group.WORLD + rank = dist.get_rank() + return (group, group_id, rank) + + def _verify_buffers_equal(self, m1, m2): + # verify buffers across models + m1_buf_dict = dict(m1.module.named_buffers()) + for name, buf in m2.module.named_buffers(): + self.assertEqual(buf, m1_buf_dict[name]) + + # Verify buffers across ranks. + m1_buffers = list(m1.buffers()) + m2_buffers = list(m2.buffers()) + for (buf1, buf2) in zip(m1_buffers, m2_buffers): + gathered_bufs = [ + torch.empty_like(buf1) for _ in range(dist.get_world_size()) + ] + dist.all_gather(gathered_bufs, buf1) + gathered_bufs_m2 = [ + torch.empty_like(buf2) for _ in range(dist.get_world_size()) + ] + for b in gathered_bufs: + self.assertEqual(b, buf1) + dist.all_gather(gathered_bufs_m2, buf2) + for b in gathered_bufs_m2: + self.assertEqual(b, buf2) + + def test_dump_DDP_relevant_env_vars(self): + with captured_output() as (out, _): + _dump_DDP_relevant_env_vars() + lines = out.getvalue().splitlines() + + def format_line(var): + return f"env:{var}={os.environ[var] if var in os.environ else 'N/A'}" + + # Check relevant env vars + vars = [ + "MASTER_ADDR", + "MASTER_PORT", + "WORLD_SIZE", + "NCCL_TOPO_DUMP_FILE", # N/A + "TORCH_NCCL_ASYNC_ERROR_HANDLING", + ] + for var in vars: + line = format_line(var) + self.assertIn(line, lines) + # Check irrelevant env vars + vars = [ + "xxx", + "yyy", + "zzz", + ] + for var in vars: + line = format_line(var) + self.assertNotIn(line, lines) + + # GET RANK + def test_get_rank(self): + test_dir = os.path.join(os.environ["TEMP_DIR"], "test_dir") + pid = str(os.getpid()) + num_processes = dist.get_world_size() + with open(os.path.join(test_dir, pid), "w") as f: + f.write(str(dist.get_rank())) + + self._barrier() + + all_ranks = set() + for f_name in os.listdir(test_dir): + with open(os.path.join(test_dir, f_name)) as f: + all_ranks.add(int(f.read())) + self.assertEqual(len(all_ranks), num_processes) + + self._barrier() + + if dist.get_rank() == 0: + for f_name in os.listdir(test_dir): + os.unlink(os.path.join(test_dir, f_name)) + + self._barrier() + + def test_get_backend(self): + if dist.get_world_size() > 2: + group = [1, 2] + else: + group = [0, 1] + group_id = dist.new_group(group) + backend_str = BACKEND.lower() + self.assertEqual(dist.get_backend(), backend_str) + if dist.get_rank() in group: + self.assertEqual(dist.get_backend(group_id), backend_str) + else: + with self.assertRaisesRegex( + ValueError, "Invalid process group specified" + ): + dist.get_backend(group_id) + + def test_Backend_enum_class(self): + # test parsing + backend = BACKEND.lower() + self.assertEqual(dist.Backend(BACKEND.upper()), backend) + self.assertEqual(dist.Backend(BACKEND), backend) + with self.assertRaises(ValueError): + dist.Backend(None) + with self.assertRaises(ValueError): + dist.Backend(3) + with self.assertRaises(ValueError): + dist.Backend(["gloo"]) + + # Test destroy + def test_destroy_group(self): + if dist.get_world_size() > 2: + group = [1, 2] + else: + group = [0, 1] + group_id = dist.new_group(group) + self._barrier() + dist.destroy_process_group(group_id) + + # Test get rank and size of group + def test_get_rank_size_group(self): + if dist.get_world_size() > 2: + group = [1, 2] + else: + group = [0, 1] + group_id = dist.new_group(group) + if dist.get_rank() in group: + self.assertEqual(dist.get_world_size(group_id), 2) + self.assertTrue(dist.get_rank(group_id) in list(range(2))) + else: + self.assertEqual(dist.get_world_size(group_id), -1) + self.assertEqual(dist.get_rank(group_id), -1) + + # Test destroy full groups + def test_destroy_full_group(self): + _, group_id, _ = self._init_full_group_test() + self._barrier() + dist.destroy_process_group(group_id) + + # Test get rank and size of full group + def test_get_rank_size_full_group(self): + _, group_id, _ = self._init_full_group_test() + self.assertEqual(dist.get_world_size(group_id), dist.get_world_size()) + self.assertEqual(dist.get_rank(group_id), dist.get_rank()) + + def _test_barrier_timeout(self, group_id, timeout): + local_rank = dist.get_rank(group_id) + + # Only execute barrier on rank == 0, causing it to timeout + if local_rank == 0: + expected_time = time.time() + timeout.total_seconds() + # In debug mode, we execute a monitored_barrier before the + # collective, so assert on that. + if dist.get_debug_level() == dist.DebugLevel.DETAIL: + exception_ctx = self.assertRaisesRegex( + Exception, "failed to pass monitoredBarrier" + ) + else: + exception_ctx = self.assertRaisesRegex( + Exception, " (Timed out|closed|timeout) " + ) + with exception_ctx: + dist.barrier(group_id) + self.assertGreaterAlmostEqual(time.time(), expected_time, delta=0.1) + else: + pass + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only gloo backend supports timeouts" + ) + @skip_but_pass_in_sandcastle_if( + not INIT_METHOD.startswith("file://"), + "Requires file:// initialization method. " + + "Both tcp:// and env:// rely on the TCP store for which " + "reinitialization has proven racy.", + ) + def test_barrier_timeout_global(self): + dist.destroy_process_group() + + # Explicitly pass world size to the barrier because we've + # just destroyed any state in torch.distributed. + self._barrier(wait_for=int(os.environ["WORLD_SIZE"])) + + # Reinitialize global process group + timeout = timedelta(seconds=1) + dist.init_process_group( + init_method=INIT_METHOD, + backend=BACKEND, + world_size=int(os.environ["WORLD_SIZE"]), + rank=self.rank, + timeout=timeout, + ) + self._test_barrier_timeout(dist.group.WORLD, timeout) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only gloo backend supports timeouts" + ) + def test_barrier_timeout_group(self): + timeout = timedelta(seconds=5) + _, group_id, _ = self._init_group_test(timeout=timeout) + if group_id is not None: + self._test_barrier_timeout(group_id, timeout) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only gloo backend supports timeouts" + ) + def test_barrier_timeout_full_group(self): + timeout = timedelta(seconds=1) + _, group_id, _ = self._init_full_group_test(timeout=timeout) + if group_id is not None: + self._test_barrier_timeout(group_id, timeout) + + # This test helper can only be used when using the Gloo or NCCL backend + # **and** both the Gloo and NCCL backends are available. + # See the @skip annotations below. + def _test_group_override_backend(self, initializer): + if BACKEND == "gloo": + new_backend = "nccl" + elif BACKEND == "nccl": + new_backend = "gloo" + elif BACKEND in DistTestCases.backend_feature["plugin"]: + new_backend = "gloo" + + group, group_id, rank = initializer(backend=new_backend) + if group_id is None: + return + + if new_backend == "gloo": + self.assertTrue(group_id._get_backend_name(), "gloo") + if new_backend == "nccl": + self.assertTrue(group_id._get_backend_name(), "nccl") + + self.assertEqual(rank, group[dist.get_rank(group_id)]) + self.assertEqual(len(group), dist.get_world_size(group_id)) + + # Pin device (so we avoid NCCL race conditions/deadlocks). + group_rank = dist.get_rank(group_id) + torch.cuda.set_device(group_rank) + + # Run broadcast of CUDA tensor (so it works for both Gloo and NCCL). + tensor = _build_tensor(2, value=group_rank).cuda() + dist.broadcast(tensor, src=group[0], group=group_id) + self.assertEqual(_build_tensor(2, value=0), tensor.to("cpu")) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_world_size(3) + @skip_if_lt_x_gpu(2) + def test_backend_group(self): + self._test_group_override_backend(self._init_group_test) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + @unittest.skipIf(BACKEND == "ucc", "broken, see https://github.com/pytorch/pytorch/pull/113620") + def test_backend_full_group(self): + self._test_group_override_backend(self._init_full_group_test) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(2) + def test_new_subgroups(self): + subgroup_size = 2 + cur_subgroup, subgroups = dist.new_subgroups(subgroup_size) + + world_size = dist.get_world_size() + self.assertEqual(cur_subgroup.size(), subgroup_size) + self.assertEqual(len(subgroups), world_size / subgroup_size) + self.assertFalse(dist._rank_not_in_group(cur_subgroup)) + + for subgroup in subgroups: + dist.destroy_process_group(subgroup) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_no_gpu + def test_new_subgroups_group_size_exceeds_world_size(self): + with self.assertRaisesRegex(ValueError, "must not exceed"): + dist.new_subgroups(100) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_world_size_not_divisible_by_group_size(self): + with self.assertRaisesRegex( + ValueError, "The world size must be divisible by 'group_size'" + ): + dist.new_subgroups(3) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_by_enumeration(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + cur_subgroup, subgroups = dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[0, 2], [1, 3]] + ) + if device_id >= 4: + self.assertIsNone(cur_subgroup) + else: + self.assertEqual(cur_subgroup.size(), 2) + self.assertEqual(len(subgroups), 2) + if device_id == 0 or device_id == 2: + self.assertEqual(cur_subgroup, subgroups[0]) + else: + self.assertEqual(cur_subgroup, subgroups[1]) + + for subgroup in subgroups: + dist.destroy_process_group(subgroup) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_by_enumeration_input_rank_exceeds_world_size(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + world_size = get_world_size(group_id) + + with self.assertRaisesRegex( + RuntimeError, + "The new group's rank should be within the world_size set by init_process_group", + ): + dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[0, 1], [world_size, 2]] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_no_gpu + def test_new_subgroups_by_enumeration_negative_input_rank(self): + group, group_id, rank = self._init_global_test() + + with self.assertRaisesRegex( + ValueError, + "The new group's rank should be within the world_size set by init_process_group", + ): + dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[-1, -2], [-3, -4]] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_overlap_not_allowed(self): + with self.assertRaisesRegex( + ValueError, "Rank 1 has appeared in both subgroup" + ): + dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[0], [1, 2], [1, 3]] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_lt_x_gpu(2) + def test_average_parameters(self): + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Sequential( + nn.Conv2d(3, 3, kernel_size=3, padding=1), + nn.ReLU(), + nn.Linear(1, 5, bias=False), + ).cuda(device_id) + # Test global model averaging + for p in model.parameters(): + p.data = torch.ones_like(p.data) + model_averaging_utils.average_parameters( + params=model.parameters(), process_group=None + ) + # Every element will be the same as the input. + for p in model.parameters(): + self.assertEqual(p.data, torch.ones_like(p.data)) + + # Test partial model averaging + for p in model.parameters(): + p.data = torch.ones_like(p.data) * rank + group_nccl = dist.new_group(ranks=[0, 1], backend="nccl") + model_averaging_utils.average_parameters( + params=model.parameters(), process_group=group_nccl + ) + if not dist._rank_not_in_group(group_nccl): + # Every element on device 0 or 1 should be the average of 0 and 1, i.e., 0.5. + for p in model.parameters(): + self.assertEqual(p.data, torch.ones_like(p.data) * 0.5) + else: + # Every element on device not in the subgroup should remain the same. + for p in model.parameters(): + self.assertEqual(p.data, torch.ones_like(p.data) * rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_lt_x_gpu(2) + def test_periodic_model_averager(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + tensor = torch.ones_like(param.data) * rank + expected_avg_tensor = ( + torch.ones_like(param.data) * sum(range(world_size)) / world_size + ) + period = 4 + for warmup_steps in [12, 13, 14, 15]: + averager = averagers.PeriodicModelAverager( + period=period, warmup_steps=warmup_steps + ) + for step in range(0, 20): + # Reset the parameters at every step. + param.data = copy.deepcopy(tensor) + for params in model.parameters(): + # mock grad + params.grad = torch.ones_like(param.data) + averager.average_parameters(model.parameters()) + if step >= warmup_steps and (step - warmup_steps) % period == 0: + self.assertEqual(param.data, expected_avg_tensor) + else: + # No model averaging, so the parameters are not updated. + self.assertEqual(param.data, tensor) + + @skip_if_lt_x_gpu(2) + def test_periodic_model_averager_param_group(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + opt = torch.optim.SGD(model.parameters(), lr=0.1) + + period = 4 + for warmup_steps in [12, 13, 14, 15]: + averager = averagers.PeriodicModelAverager( + period=period, warmup_steps=warmup_steps + ) + for step in range(0, 20): + # Reset the parameters at every step. + for param_group in opt.param_groups: + for params in param_group["params"]: + # mock grad + params.grad = torch.ones_like(param.data) * rank + params.data = torch.ones_like(param.data) * rank + averager.average_parameters(opt.param_groups) + if step >= warmup_steps and (step - warmup_steps) % period == 0: + for param_group in opt.param_groups: + for params in param_group["params"]: + if params.grad is None: + continue + self.assertEqual( + param.data, + torch.ones_like(param.data) + * sum(range(world_size)) + / world_size, + ) + else: + # No model averaging, so the parameters are not updated. + for param_group in opt.param_groups: + for params in param_group["params"]: + if params.grad is None: + continue + self.assertEqual( + param.data, torch.ones_like(param.data) * rank + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_lt_x_gpu(2) + def test_1_level_hierarchical_model_averager_equivalent_to_periodic_model_averager( + self, + ): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + tensor = torch.ones_like(param.data) * rank + expected_avg_tensor = ( + torch.ones_like(param.data) * sum(range(world_size)) / world_size + ) + period = 4 + for warmup_steps in [12, 13, 14, 15]: + averager = hierarchicalSGD.HierarchicalModelAverager( + # Run the global averaging at a period of 4, + # which is equivalent to the above periodic model averaging test case. + period_group_size_dict=OrderedDict([(period, world_size)]), + warmup_steps=warmup_steps, + ) + + averager = averagers.PeriodicModelAverager( + period=period, warmup_steps=warmup_steps + ) + for step in range(0, 20): + # Reset the parameters at every step. + param.data = copy.deepcopy(tensor) + for params in model.parameters(): + # mock grad + params.grad = torch.ones_like(param.data) + averager.average_parameters(model.parameters()) + if step >= warmup_steps and (step - warmup_steps) % period == 0: + self.assertEqual(param.data, expected_avg_tensor) + else: + # No model averaging, so the parameters are not updated. + self.assertEqual(param.data, tensor) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_3_level_hierarchical_model_averager(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + tensor = torch.ones_like(param.data) * rank + # Set up such a hierarchical model averaging as follows: + # after the first 10 warmup steps, + # run model averaging every 2 steps within each subgroup of size 2, + # run model averaging every 4 steps within each subgroup of size 3, + # and run the global model averaging every 8 steps. + # If there is a conflict in model averaging at a step, only run the highest-level model averaging. + warmup_steps = 10 + subgroup_size1 = 2 + subgroup_avg_period1 = 2 + subgroup_size2 = 4 + subgroup_avg_period2 = 4 + global_avg_period = 8 + period_group_size_dict = OrderedDict( + [ + (subgroup_avg_period1, subgroup_size1), + (subgroup_avg_period2, subgroup_size2), + (global_avg_period, world_size), + ] + ) + averager = hierarchicalSGD.HierarchicalModelAverager( + period_group_size_dict=period_group_size_dict, warmup_steps=warmup_steps + ) + subgroup1 = averager.period_process_group_dict[subgroup_avg_period1] + subgroup2 = averager.period_process_group_dict[subgroup_avg_period2] + + real_group_ranks_res1 = dist.get_process_group_ranks(subgroup1) + real_group_ranks_res2 = dist.get_process_group_ranks(subgroup2) + expect_group_ranks_res1 = ( + rank // subgroup_size1 * subgroup_size1 + + np.array(list(range(subgroup_size1))) + ).tolist() + expect_group_ranks_res2 = ( + rank // subgroup_size2 * subgroup_size2 + + np.array(list(range(subgroup_size2))) + ).tolist() + self.assertEqual(real_group_ranks_res1, expect_group_ranks_res1) + self.assertEqual(real_group_ranks_res2, expect_group_ranks_res2) + + expected_avg_tensor_within_subgroup1 = ( + torch.ones_like(param.data) + * sum(real_group_ranks_res1) + / subgroup_size1 + ) + expected_avg_tensor_within_subgroup2 = ( + torch.ones_like(param.data) + * sum(real_group_ranks_res2) + / subgroup_size2 + ) + expected_global_avg_tensor = ( + torch.ones_like(param.data) * sum(range(world_size)) / world_size + ) + for step in range(0, 25): + # Reset the parameters at every step. + param.data = copy.deepcopy(tensor) + for params in model.parameters(): + # mock grad + params.grad = torch.ones_like(param.data) + averager.average_parameters(model.parameters()) + if step == 16 or step == 24: + # Run global model averaging when `step` can be divided by 8. + self.assertEqual(param.data, expected_global_avg_tensor) + elif step == 12 or step == 20: + # Run model averaging within subgroup when `step` can be divided by 4 but not by 8. + self.assertEqual(param.data, expected_avg_tensor_within_subgroup2) + elif step == 10 or step == 14 or step == 18 or step == 22: + # Run model averaging within subgroup when `step` can be divided by 2 but not by 4 or 8. + self.assertEqual(param.data, expected_avg_tensor_within_subgroup1) + else: + # No model averaging, so the parameters are not updated. + self.assertEqual(param.data, tensor) + + # Coalescing manager (sync mode) + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" or IS_FBCODE or IS_SANDCASTLE, + "Coalescing manager currently tests with NCCL only; internal test flaky" + ) + def test_coalescing_manager(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + num_colls = 2 + size_per_coll = 8 + small_tensors = [ + torch.ones(size_per_coll, device=device_id) for _ in range(num_colls) + ] + + with dist._coalescing_manager(): + for i in range(num_colls): + dist.all_reduce(small_tensors[i]) + + big_tensor = torch.ones(num_colls * size_per_coll, device=device_id) + dist.all_reduce(big_tensor) + + for i in range(num_colls): + self.assertEqual( + small_tensors[i], + big_tensor[i * size_per_coll : (i + 1) * size_per_coll] + ) + + self._barrier() + + # Coalescing manager (async mode) + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" or IS_FBCODE or IS_SANDCASTLE, + "Coalescing manager currently tests with NCCL only; internal test flaky" + ) + def test_coalescing_manager_async(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + num_colls = 2 + size_per_coll = 8 + small_tensors = [ + torch.ones(size_per_coll, device=device_id) for _ in range(num_colls) + ] + + with dist._coalescing_manager(async_ops=True) as cm: + for i in range(num_colls): + dist.all_reduce(small_tensors[i]) + cm.wait() + + big_tensor = torch.ones(num_colls * size_per_coll, device=device_id) + dist.all_reduce(big_tensor) + + for i in range(num_colls): + self.assertEqual( + small_tensors[i], + big_tensor[i * size_per_coll : (i + 1) * size_per_coll] + ) + + self._barrier() + + # NCCL Batch SEND RECV + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_nccl(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + p2p_op_list = [] + recv_tensors = [None for _ in range(world_size)] + expected_tensors = [None for _ in range(world_size)] + + for val in ["1", "0"]: + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = val + for src in range(0, world_size): + send_tensor = _build_tensor(rank + 1, device_id=device_id).fill_( + src + ) + recv_tensors[src] = _build_tensor( + src + 1, value=-1, device_id=device_id + ).fill_(-1) + expected_tensors[src] = _build_tensor( + src + 1, value=-1, device_id=device_id + ).fill_(rank) + recv_op = dist.P2POp(dist.irecv, recv_tensors[src], src) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, src) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + for src in range(0, world_size): + self.assertEqual(recv_tensors[src], expected_tensors[src]) + + self._barrier() + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_ring_exchange_nccl(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + p2p_op_list = [] + + send_tensor = _build_tensor(world_size, device_id=device_id) + recv_tensor = _build_tensor(world_size, value=-1, device_id=device_id) + send_op = dist.P2POp(dist.isend, send_tensor, (rank + 1) % world_size) + recv_op = dist.P2POp( + dist.irecv, recv_tensor, (rank - 1 + world_size) % world_size + ) + reqs = dist.batch_isend_irecv([send_op, recv_op]) + for req in reqs: + req.wait() + + self._barrier() + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_self_nccl(self): + self._barrier() + # Ensure the process group has been fully initialized (needed by + # the first sub-group batch_isend_irecv call) + dist.barrier() + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + p2p_op_list = [] + + if rank == 0: + send_tensor = _build_tensor(rank + 1, device_id=device_id) + recv_tensor = _build_tensor(rank + 1, value=-1, device_id=device_id) + recv_op = dist.P2POp(dist.irecv, recv_tensor, 0) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, 0) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + @skip_if_no_gpu + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_no_rank_zero_nccl(self): + self._barrier() + # Ensure the process group has been fully initialized (needed by + # the first sub-group batch_isend_irecv call) + dist.barrier() + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + p2p_op_list = [] + + if rank == 1: + peer = 2 + elif rank == 2: + peer = 1 + + if rank in [1, 2]: + send_tensor = _build_tensor(rank + 1, device_id=device_id) + recv_tensor = _build_tensor(peer + 1, value=-1, device_id=device_id) + recv_op = dist.P2POp(dist.irecv, recv_tensor, peer) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, peer) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + # GLOO Batch SEND RECV CPU + @skip_but_pass_in_sandcastle_if(BACKEND != "gloo", "GLOO Batch Send Recv CPU") + def test_batch_isend_irecv_gloo(self): + self._barrier() + rank = dist.get_rank() + p2p_op_list = [] + + for src in range(0, dist.get_world_size()): + if src == rank: + continue + send_tensor = _build_tensor(rank + 1) + recv_tensor = _build_tensor(src + 1, value=-1) + recv_op = dist.P2POp(dist.irecv, recv_tensor, src) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, src) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + # GLOO Batch SEND RECV CPU with provided tags + @skip_but_pass_in_sandcastle_if(BACKEND != "gloo", "GLOO Batch Send Recv CPU") + def test_batch_isend_irecv_gloo_tags(self): + self._barrier() + rank = dist.get_rank() + p2p_op_list = [] + + for src in range(0, dist.get_world_size()): + if src == rank: + continue + send_tensor = _build_tensor(rank + 1) + recv_tensor = _build_tensor(src + 1, value=-1) + recv_op = dist.P2POp(dist.irecv, recv_tensor, src, tag=src) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, src, tag=rank) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + # NCCL Batch SEND RECV Op Error + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_op_err(self): + self._barrier() + rank = dist.get_rank() + if rank == 0: + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + with self.assertRaisesRegex(ValueError, "^Invalid ``op``"): + send_tensor = _build_tensor(rank + 1, device_id=device_id) + send_op = dist.P2POp(dist.broadcast, send_tensor, 1) + dist.batch_isend_irecv([send_op]) + + # NCCL Batch SEND RECV p2p_op_list Error + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_op_list_err(self): + self._barrier() + rank = dist.get_rank() + if rank == 0: + with self.assertRaisesRegex(ValueError, "^Invalid ``p2p_op_list``"): + dist.batch_isend_irecv([1, 2]) + + # NCCL Batch SEND RECV Mixed Backend Error + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_mixed_backend_err(self): + self._barrier() + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + group_gloo = dist.new_group(ranks=[0, 1], backend="gloo") + group_nccl = dist.new_group(ranks=[0, 1], backend="nccl") + if rank == 0: + with self.assertRaisesRegex( + ValueError, "All ops need to use the same group" + ): + send_tensor = _build_tensor(rank + 1) + send_op_gloo = dist.P2POp(dist.isend, send_tensor, 1, group_gloo) + send_op_nccl = dist.P2POp(dist.isend, send_tensor, 1, group_nccl) + dist.batch_isend_irecv([send_op_gloo, send_op_nccl]) + + # NCCL SEND RECV + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def _test_send_recv_nccl(self, profiler_ctx=None): + # TODO: now that nccl send/recv is supported, there does not seem to + # be a need to have nccl send/recv be tested separately. + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + + tensor = _build_tensor(rank + 1, device_id=device_id) + profiler_cls = profiler_ctx if profiler_ctx is not None else nullcontext() + with profiler_cls as prof: + for src in range(0, world_size): + if src == rank: + # Send mode + for dst in range(0, world_size): + if dst == rank: + continue + dist.send(tensor, dst) + else: + # Recv mode + expected_tensor = _build_tensor(src + 1) + output_tensor = _build_tensor( + src + 1, value=-1, device_id=device_id + ) + dist.recv(output_tensor, src) + self.assertEqual(output_tensor, expected_tensor) + + self._barrier() + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recv"]: + events = get_profiling_event(event_name, prof) + self.assertTrue(events) + # Event order is not deterministic, so simply assert their shape + # is found in the following list. + expected_shapes = [ + [[rank + 1] * 3] for rank in range(dist.get_world_size()) + ] + for event in events: + self.assertTrue(event.input_shapes in expected_shapes) + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_send_recv_nccl(self): + self._test_send_recv_nccl() + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_send_recv_nccl_autograd_profiler(self): + profiler_ctx = torch.autograd.profiler.profile(record_shapes=True) + self._test_send_recv_nccl(profiler_ctx) + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_nccl_torch_profiler(self): + profiler_ctx = torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + torch.profiler.ProfilerActivity.CUDA, + ], + record_shapes=True, + ) + self._test_send_recv_nccl(profiler_ctx) + + # SEND RECV + def _test_send_recv(self, profiler_ctx): + rank = dist.get_rank() + send_size = rank + 1 + tensor = _build_tensor(send_size) + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + for src in range(0, dist.get_world_size()): + if src == rank: + # Send mode + for dst in range(0, dist.get_world_size()): + if dst == rank: + continue + dist.send(tensor, dst) + else: + # Recv mode + recv_size = src + 1 + expected_tensor = _build_tensor(recv_size) + output_tensor = _build_tensor(recv_size, value=-1) + dist.recv(output_tensor, src) + self.assertEqual(output_tensor, expected_tensor) + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recv"]: + events = get_profiling_event(event_name, prof) + # Each rank sends/recvs from all other ranks. + event_count = sum(e.count for e in events) + expected_event_count = dist.get_world_size() - 1 + self.assertEqual(event_count, expected_event_count) + # Event order is not deterministic, so simply assert their shape + # is found in the following list. + expected_shapes = [ + [[rank + 1] * 3] for rank in range(dist.get_world_size()) + ] + for event in events: + self.assertTrue(event.is_async) + self.assertTrue(event.input_shapes in expected_shapes) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl send/recv tested by test_send_recv_nccl" + ) + def test_send_recv(self): + self._test_send_recv(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + def test_send_recv_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + self._test_send_recv(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + return self._test_send_recv(profiler_ctx=torch_profiler_ctx) + + # SEND RECV ANY SOURCE + def _test_send_recv_any_source(self, profiler_ctx): + rank = dist.get_rank() + send_recv_size = 10 + tensor = _build_tensor(send_recv_size, value=rank) + recv_ranks = list() + irecv_ranks = list() + + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + for dst in range(0, dist.get_world_size()): + if dst == rank: + # Recv mode + for dst in range(0, dist.get_world_size()): + if dst == rank: + continue + + for recv in ["recv", "irecv"]: + output_tensor = _build_tensor(send_recv_size, value=-1) + + if recv == "recv": + sender = dist.recv(output_tensor) + recv_ranks.append(sender) + elif recv == "irecv": + work = dist.irecv(output_tensor) + work.wait() + sender = work._source_rank() + irecv_ranks.append(sender) + + # Assert the scalar value "sender" that should be + # equal to the rank of the sender is equal to all + # values in the received tensor. + self.assertTrue(output_tensor.eq(sender).all()) + else: + # Send mode + dist.send(tensor, dst) # recv + dist.send(tensor, dst) # irecv + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recvAnySource"]: + events = get_profiling_event(event_name, prof) + # Each rank sends/recvs from other rank twice. + self.assertEqual( + sum(event.count for event in events), + 2 * (dist.get_world_size() - 1), + ) + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.input_shapes, [[send_recv_size] * 3]) + + # Each rank would have 2 * (world_size - 1) sends, verify that + # globally we receive the same amount on the other end. + recv_ranks_tensor = torch.cat( + (torch.tensor(recv_ranks), torch.tensor(irecv_ranks)), 0 + ) + global_recv_ranks = [ + torch.empty_like(recv_ranks_tensor) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(global_recv_ranks, recv_ranks_tensor) + global_recv_ranks_list = [] + for tensor in global_recv_ranks: + global_recv_ranks_list += tensor.tolist() + + from itertools import groupby + + global_recv_ranks_list.sort() + frequency = [ + len(list(group)) for key, group in groupby(global_recv_ranks_list) + ] + self.assertEqual(dist.get_world_size(), len(frequency)) + self.assertEqual( + [2 * (dist.get_world_size() - 1)] * dist.get_world_size(), frequency + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["sendrecv anysource"], + f"{BACKEND} does not support send/recv from any source", + ) + def test_send_recv_any_source(self): + self._test_send_recv_any_source(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["sendrecv anysource"], + f"{BACKEND} does not support send/recv from any source", + ) + def test_send_recv_any_source_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + self._test_send_recv_any_source(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["sendrecv anysource"], + f"{BACKEND} does not support send/recv from any source", + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_any_source_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + return self._test_send_recv_any_source(profiler_ctx=torch_profiler_ctx) + + # SEND RECV WITH TAG + def _test_send_recv_with_tag(self, profiler_ctx): + rank = dist.get_rank() + world_size = dist.get_world_size() + send_recv_size = 10 + tensor = _build_tensor(send_recv_size, value=rank) + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + for dst in range(0, world_size): + if dst == rank: + # Recv mode + for src in range(0, world_size): + if src == rank: + continue + output_tensor = _build_tensor(send_recv_size, value=-1) + dist.recv(output_tensor, src, tag=src) + self.assertTrue(output_tensor.eq(src).all()) + else: + # Send mode + dist.send(tensor, dst, tag=rank) + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recv"]: + events = get_profiling_event(event_name, prof) + # Each rank sends/recvs from all other ranks + event_count = sum(e.count for e in events) + expected_event_count = dist.get_world_size() - 1 + self.assertEqual(event_count, expected_event_count) + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.name, event_name) + self.assertEqual(event.input_shapes, [[send_recv_size] * 3]) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + def test_send_recv_with_tag(self): + self._test_send_recv_with_tag(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + def test_send_recv_with_tag_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + return self._test_send_recv_with_tag(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_with_tag_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + return self._test_send_recv_with_tag(profiler_ctx=torch_profiler_ctx) + + # ISEND + def _test_isend(self, profiler_ctx): + rank = dist.get_rank() + world_size = dist.get_world_size() + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + if rank == 0: + requests = [ + dist.isend(_build_tensor(dest, 10), dest) + for dest in range(1, world_size) + ] + for request in requests: + request.wait() + self.assertTrue(request.is_completed()) + else: + tensor = _build_tensor(rank, -1) + dist.recv(tensor, 0) + self.assertEqual(tensor, _build_tensor(rank, 10)) + + self._barrier() + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + expected_event_name = ( + f"{backend}:send" if rank == 0 else f"{backend}:recv" + ) + events = get_profiling_event(expected_event_name, prof) + event_count = sum(e.count for e in events) + expected_count = dist.get_world_size() - 1 if rank == 0 else 1 + self.assertEqual(expected_count, event_count) + # Event ordering is not guaranteed, so simply ensure the shapes are + # found in the following map. + expected_shapes = { + r: [[r] * 3] for r in range(1, dist.get_world_size()) + } + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.name, expected_event_name) + if rank == 0: + self.assertTrue( + event.input_shapes in expected_shapes.values() + ) + else: + self.assertEqual(event.input_shapes, expected_shapes[rank]) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support isend" + ) + def test_isend(self): + self._test_isend(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support isend" + ) + def test_isend_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + self._test_isend(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support isend" + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_isend_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + self._test_isend(profiler_ctx=torch_profiler_ctx) + + # IRECV + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support irecv" + ) + def test_irecv(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + + if rank == 0: + expected_tensors = [ + _build_tensor(src, -1) for src in range(1, world_size) + ] + requests = [ + dist.irecv(expected_tensors[src - 1], src) + for src in range(1, world_size) + ] + + for src in range(1, world_size): + requests[src - 1].wait() + self.assertTrue(requests[src - 1].is_completed()) + self.assertEqual(expected_tensors[src - 1], _build_tensor(src, 10)) + else: + tensor = _build_tensor(rank, 10) + dist.send(tensor, 0) + + self._barrier() + + # BROADCAST + def _test_broadcast_helper( + self, + group, + group_id, + rank, + cuda=False, + rank_to_GPU=None, + with_options=False, + ): + for dtype, value, requires_cuda in [ + (torch.float, -1e-10, False), + (torch.double, -1e-100, False), + (torch.half, -0.1, True), + (torch.int8, -2, False), + (torch.uint8, 129, False), + (torch.int, -1e5, False), + (torch.long, -1e15, False), + ]: + if requires_cuda and not cuda: + continue + for src in group: + expected_tensor = _build_tensor(src + 1, value, dtype) + if cuda: + expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0]) + if rank == src: + if with_options: + opts = dist.BroadcastOptions() + opts.rootTensor = 0 + opts.rootRank = src + self.call_dist_op( + ":broadcast", + True, + group_id.broadcast, + [expected_tensor], + opts, + ) + else: + self.call_dist_op( + ":broadcast", + False, + dist.broadcast, + expected_tensor, + src, + group_id, + ) + else: + tensor = _build_tensor(src + 1, -1, dtype) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + if with_options: + opts = dist.BroadcastOptions() + opts.rootTensor = 0 + opts.rootRank = src + self.call_dist_op( + ":broadcast", True, group_id.broadcast, [tensor], opts + ) + else: + self.call_dist_op( + ":broadcast", + False, + dist.broadcast, + tensor, + src, + group_id, + ) + self.assertEqual(tensor.size(), expected_tensor.size()) + self.assertEqual( + tensor.ne(expected_tensor).max(), torch.tensor(False) + ) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_broadcast(self): + group, group_id, rank = self._init_global_test() + self._test_broadcast_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and Nccl backend supports CUDA allReduce", + ) + @skip_if_no_gpu + def test_broadcast_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_broadcast_group(self): + group, group_id, rank = self._init_group_test() + self._test_broadcast_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_broadcast_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_broadcast_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", + "Only NCCL backend supports high priority stream", + ) + @skip_if_no_gpu + def test_nccl_high_priority_stream(self): + group, _, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + + new_port = str(MASTER_PORT + 1) + os.environ["MASTER_PORT"] = new_port + gen_iterator = dist.rendezvous("env://", rank, dist.get_world_size()) + store, rank, size = next(gen_iterator) + store = dist.PrefixStore(new_port, store) + + opts = dist.ProcessGroupNCCL.Options() + opts.is_high_priority_stream = False + group_id = dist.ProcessGroupNCCL(store, rank, size, opts) + + self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU, True) + + # REDUCE + def _test_reduce_helper( + self, + group, + group_id, + rank, + op, + master_value, + worker_value, + expected_value, + cuda=False, + rank_to_GPU=None, + ): + for src in group: + tensor = _build_tensor(src + 1).fill_( + master_value if rank == src else worker_value + ) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + self.call_dist_op( + ":reduce", + False, + dist.reduce, + tensor, + src, + op, + group_id, + tensor_shapes=[tensor.shape], + ) + if rank == src: + self.assertEqual(tensor, _build_tensor(src + 1, expected_value)) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_sum(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA reduce" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_no_gpu + def test_reduce_sum_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + 10 * (len(group) - 1), + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_product(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_min(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_max(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_sum(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_product(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_min(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_max(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_sum(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_product(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_min(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_max(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + # REDUCE TWICE + def _test_reduce_twice_helper( + self, + group, + group_id, + rank, + op, + master_value, + worker_value, + expected_value, + cuda=False, + rank_to_GPU=None, + ): + for src in group: + tensors = [ + _build_tensor(src + 1).fill_( + master_value if rank == src else worker_value + ) + for i in range(2) + ] + if cuda: + for i in range(2): + tensors[i] = tensors[i].cuda(rank_to_GPU[rank][0]) + self.call_dist_op( + ":reduce", + False, + dist.reduce, + tensors[0], + src, + op, + group_id, + secondary_op_call=lambda: dist.reduce( + tensors[1], src, op, group_id + ), + tensor_shapes=[tensors[0].shape], + ) + if rank == src: + for tensor in tensors: + self.assertEqual(tensor, _build_tensor(src + 1, expected_value)) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_sum_twice(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_twice_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA reduce" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_no_gpu + def test_reduce_sum_cuda_twice(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + self._test_reduce_twice_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + 10 * (len(group) - 1), + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports reduce_scatter_v" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_no_gpu + def test_reduce_scatter_v_cuda(self): + self._barrier() + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + + input_split_sizes = [] + for src in group: + input_split_sizes.append(src + 1) + start_len = sum(input_split_sizes[:rank]) + end_len = start_len + input_split_sizes[rank] + sum_len = sum(input_split_sizes) + master_value = 2 + worker_value = 10 + + for async_val in [True, False]: + tensor = _build_tensor(sum_len, worker_value, device_id=device_id) + tensor[start_len:end_len].fill_(master_value) + out_tensor = ( + torch.empty( + input_split_sizes[rank], sum_len, sum_len, dtype=torch.float + ) + .fill_(-1) + .cuda(device_id) + ) + + req = dist.reduce_scatter( + out_tensor, + list(torch.split(tensor, input_split_sizes)), + dist.ReduceOp.SUM, + group_id, + async_val, + ) + if async_val: + req.wait() + + expected_value = 2 + (10 * (len(group) - 1)) + expected_tensor = torch.empty( + input_split_sizes[rank], sum_len, sum_len, dtype=torch.float + ) + expected_tensor = expected_tensor.fill_(expected_value).cuda(device_id) + + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + # Test reduce_scatter_tensor accepting single tensor as input + def _reduce_scatter_tensor_helper( + self, tensor_out, tensor_in, group_id, rank, cuda=True, rank_to_GPU=None + ): + if cuda: + tensor_in = tensor_in.cuda(rank_to_GPU[rank][0]) + tensor_out = tensor_out.cuda(rank_to_GPU[rank][0]) + tensor_shapes = [tensor_out.shape] + self.call_dist_op( + ":reduce_scatter_tensor", + False, + dist.reduce_scatter_tensor, + tensor_out, + tensor_in, + dist.ReduceOp.SUM, + group_id, + False, + expect_event=False, + tensor_shapes=tensor_shapes, + ) + return tensor_out + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA reduce_scatter_tensor" + ) + @skip_if_no_gpu + def test_reduce_scatter_tensor_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + size = 2 + tensor_out = torch.zeros(size, dtype=torch.int64) + + # Concatenated input + tensor_in = torch.arange(len(group) * size) + tensor_out = self._reduce_scatter_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + # Check result + expected_tensor = torch.arange(rank * size, (rank + 1) * size) * len(group) + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + # Stacked input + tensor_in = torch.reshape(tensor_in, (len(group), size)) + tensor_out = self._reduce_scatter_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + # Check result + # Should be the same as the result in concatenated case + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + @skip_if_no_gpu + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + def test_all_reduce_result_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + for src in group: + if rank == src: + tensor = _build_tensor(src + 1, 2) + else: + tensor = _build_tensor(src + 1, 10) + tensor = tensor.cuda(rank_to_GPU[rank][0]) + + opts = AllreduceOptions() + opts.reduceOp = dist.ReduceOp.SUM + + if group_id == GroupMember.WORLD: + work = _get_default_group().allreduce([tensor], opts) + else: + work = group_id.allreduce([tensor], opts) + + if BACKEND == "gloo": + # Calling result right the work is finished should throw exception. + # Here we have a race condition, we may not assume the work is not + # finished by the time we run next lines. + try: + with self.assertRaisesRegex( + RuntimeError, + "Work needs to be completed before calling result", + ): + work.result() + except AssertionError: + # Exception was not raised, ensure is_completed() + self.assertTrue(work.is_completed()) + + work.wait() + result = work.result() + else: + # In case of NCCL we should be able to retrieve pointer to the result + # even before work is finished. + result = work.result() + work.wait() + + expected_value = 2 + (10 * (len(group) - 1)) + self.assertEqual(result, [_build_tensor(src + 1, expected_value)]) + self._barrier() + + def call_dist_op( + self, + profiling_title_postfix, + is_async, + op, + *args, + expect_event=True, + secondary_op_call=None, + profile_cuda=False, + tensor_shapes=None, + **kwargs, + ): + op_calls = [lambda: op(*args, **kwargs)] + if secondary_op_call is not None: + op_calls.append(secondary_op_call) + + autograd_profiler_ctx = torch.autograd.profiler.profile( + use_cuda=profile_cuda, record_shapes=True + ) + + # TODO: move this test to use torch.profiler once kineto issues are + # fixed internally. + with autograd_profiler_ctx as prof: + works = [op_call() for op_call in op_calls] + if is_async: + for work in works: + work.wait() + + if expect_event and dist.get_backend() in PROFILING_SUPPORTED_BACKENDS: + # We are only interested in the backend's implementation not the dispatcher wrapper. + events = get_profiling_event( + dist.get_backend() + profiling_title_postfix, autograd_profiler_ctx + ) + # DETAIL debug mode can use a pg wrapper that issues more collectives + # under the hood + if dist.get_debug_level() != dist.DebugLevel.DETAIL: + self.assertEqual(len(events), len(op_calls)) + for e in events: + self.assertTrue(e.is_async) + self.assertEqual(e.count, 1) + self.assertGreaterEqual(e.cpu_time, 0) + # Verify tensor shapes if given + # DETAIL debug mode can use a pg wrapper that issues more collectives + # under the hood + if ( + tensor_shapes is not None + and dist.get_debug_level() != dist.DebugLevel.DETAIL + ): + self.assertEqual( + e.input_shapes, + tensor_shapes, + f"event shape: {e.input_shapes} vs tensor {tensor_shapes}", + ) + + # ALL REDUCE + def _test_all_reduce_helper( + self, + group, + group_id, + rank, + op, + master_value, + worker_value, + expected_value, + cuda=False, + rank_to_GPU=None, + dtype=torch.float, + async_op=False, + ): + for src in group: + curr_value = master_value if rank == src else worker_value + + tensor = _build_tensor(src + 1, dtype=dtype).fill_(curr_value) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + if tensor.dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(tensor).shape] + else: + tensor_shapes = [tensor.shape] + self.call_dist_op( + ":all_reduce", + async_op, + dist.all_reduce, + tensor, + op, + group_id, + async_op=async_op, + tensor_shapes=tensor_shapes, + ) + # Currently, only Gloo backend has profiling tested with CUDA enabled. + # Only run cuda profiling test for one rank to speed up since + # running with different src_rank does not affect the correctness. + if ( + src == 0 + and cuda + and dist.get_backend() in CUDA_PROFILING_SUPPORTED_BACKENDS + ): + self.call_dist_op( + ":all_reduce", + async_op, + dist.all_reduce, + tensor, + op, + group_id, + async_op=async_op, + profile_cuda=True, + tensor_shapes=tensor_shapes, + ) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_sum(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_sum_async(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + async_op=True, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and NCCL backends will have CUDA allReduce tested", + ) + @skip_if_no_gpu + def test_all_reduce_sum_cuda(self): + torch.cuda.set_device(self.rank) + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and NCCL backends will have CUDA allReduce tested", + ) + @skip_if_no_gpu + def test_all_reduce_sum_cuda_async(self): + torch.cuda.set_device(self.rank) + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + True, + rank_to_GPU, + async_op=True, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_sum_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + complex(2, 3), + complex(10, 11), + complex(2, 3) + (complex(10, 11) * (len(group) - 1)), + dtype=torch.cfloat, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_complex_unsupported_ops(self): + unsupported_ops = [ + dist.ReduceOp.MAX, + dist.ReduceOp.MIN, + dist.ReduceOp.PRODUCT, + dist.ReduceOp.BAND, + dist.ReduceOp.BOR, + dist.ReduceOp.BXOR, + ] + group, group_id, rank = self._init_global_test() + for unsupported_op in unsupported_ops: + with self.assertRaisesRegex( + ValueError, "all_reduce does not support" + ): + dist.all_reduce( + _build_tensor(1, dtype=torch.cfloat), unsupported_op, group_id + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and NCCL backends will have CUDA allReduce tested", + ) + @skip_if_no_gpu + def test_all_reduce_sum_cuda_complex(self): + torch.cuda.set_device(self.rank) + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + complex(2, 3), + complex(10, 11), + complex(2, 3) + (complex(10, 11) * (len(group) - 1)), + True, + rank_to_GPU, + dtype=torch.cfloat, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_product(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_min(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_max(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_sum(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_product(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2), + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_min(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_max(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_sum(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_product(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_min(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_max(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + # SPARSE ALL REDUCE + def _test_sparse_all_reduce_sum(self, fn): + group, group_id, rank = self._init_global_test() + + tests = simple_sparse_reduce_tests( + rank, dist.get_world_size(), num_inputs=1 + ) + for (inputs, outputs) in tests: + tensors = [fn(input) for input in inputs] + dist.all_reduce(tensors[0], dist.ReduceOp.SUM, group_id) + self.assertEqual(tensors[0], outputs[0]) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only Gloo backend support sparse all reduce" + ) + def test_sparse_all_reduce_sum(self): + self._test_sparse_all_reduce_sum(lambda t: t) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only Gloo backend support sparse all reduce" + ) + @skip_if_no_gpu + def test_sparse_all_reduce_sum_cuda(self): + self._test_sparse_all_reduce_sum(lambda t: t.clone().cuda()) + + # ALL REDUCE - COALESCED + @staticmethod + def _all_reduce_coalesced_sum_test_cases(group_size): + return ( + [2, 3, complex(2, 3)], + [10, 11, complex(10, 11)], + [ + 2 + 10 * (group_size - 1), + 3 + 11 * (group_size - 1), + complex(2, 3) + complex(10, 11) * (group_size - 1), + ], + [torch.float, torch.float, torch.cfloat], + ) + + @staticmethod + def _all_reduce_coalesced_product_test_cases(group_size): + return ( + [1, 2], + [3, 4], + [1 * 3 ** (group_size - 1), 2 * 4 ** (group_size - 1)], + [torch.float, torch.float], + ) + + @staticmethod + def _all_reduce_coalesced_min_test_cases(group_size): + return ( + [1, 4], + [2, 3], + [1, 3], + [torch.float, torch.float], + ) + + @staticmethod + def _all_reduce_coalesced_max_test_cases(group_size): + return ( + [1, 4], + [2, 3], + [2, 4], + [torch.float, torch.float], + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_coalesced_max_complex_unsupported(self): + group, group_id, rank = self._init_global_test() + with self.assertRaisesRegex(ValueError, "all_reduce does not support"): + dist.all_reduce_coalesced( + [_build_tensor(1, dtype=torch.cfloat)], dist.ReduceOp.MAX, group_id + ) + + def _test_all_reduce_coalesced_helper( + self, + group, + group_id, + rank, + op, + cuda=False, + rank_to_GPU=None, + ): + test_case_func = { + dist.ReduceOp.SUM: self._all_reduce_coalesced_sum_test_cases, + dist.ReduceOp.PRODUCT: self._all_reduce_coalesced_product_test_cases, + dist.ReduceOp.MIN: self._all_reduce_coalesced_min_test_cases, + dist.ReduceOp.MAX: self._all_reduce_coalesced_max_test_cases, + }[op] + + master_values, worker_values, expected_values, dtypes = test_case_func( + len(group) + ) + + for src in group: + curr_values = master_values if rank == src else worker_values + tensors = [ + _build_tensor(src + 1, val, dtype=dtype) + for dtype, val in zip(dtypes, curr_values) + ] + if cuda: + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + tensor_shapes = [] + for tensor in tensors: + if tensor.dtype == torch.complex64: + tensor_shapes.append(torch.view_as_real(tensor).shape) + else: + tensor_shapes.append(tensor.shape) + self.call_dist_op( + ":all_reduce", + False, + dist.all_reduce_coalesced, + tensors, + op, + group_id, + tensor_shapes=tensor_shapes, + ) + expected_tensors = [ + _build_tensor(src + 1, expected_value, dtype=dtype) + for dtype, expected_value in zip(dtypes, expected_values) + ] + self.assertEqual(tensors, expected_tensors) + + self._barrier() + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_sum(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_product(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_min(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.MIN, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_max(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_sum(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_product(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + cuda=False, + rank_to_GPU=None, + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_min(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MIN, cuda=False, rank_to_GPU=None + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_max(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_sum(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_product(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_min(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.MIN, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_max(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None + ) + + # SCATTER + def _test_scatter_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + for dest in group: + tensor = _build_tensor(dest + 1, -1, dtype=dtype) + expected_tensor = _build_tensor(dest + 1, rank, dtype=dtype) + tensors = ( + [_build_tensor(dest + 1, i, dtype=dtype) for i in group] + if rank == dest + else [] + ) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + if dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(t).shape for t in tensors] + else: + tensor_shapes = [t.shape for t in tensors] + self.call_dist_op( + ":scatter", + False, + dist.scatter, + tensor, + src=dest, + scatter_list=tensors, + group=group_id, + expect_event=False, + tensor_shapes=tensor_shapes, + ) + self.assertEqual(tensor, expected_tensor) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter_checks(self): + group, group_id, rank = self._init_global_test() + one = torch.ones([1]) + + # Specify scatter_list argument only on source rank. + output = one.clone() * -1 + if rank == 0: + scatter_list = [one.clone() * i for i in group] + dist.scatter(output, src=0, scatter_list=scatter_list) + else: + dist.scatter(output, src=0) + self.assertEqual(output, one * rank) + + # Don't specify src argument. + output = one.clone() * -1 + if rank == 0: + scatter_list = [one.clone() * i for i in group] + dist.scatter(output, scatter_list=scatter_list) + else: + dist.scatter(output) + self.assertEqual(output, one * rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter(self): + group, group_id, rank = self._init_global_test() + self._test_scatter_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA gather" + ) + @skip_if_no_gpu + def test_scatter_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_scatter_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter_complex(self): + group, group_id, rank = self._init_global_test() + self._test_scatter_helper(group, group_id, rank, dtype=torch.cfloat) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA gather" + ) + @skip_if_no_gpu + def test_scatter_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_scatter_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @skip_if_small_worldsize + def test_scatter_group(self): + group, group_id, rank = self._init_group_test() + self._test_scatter_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_scatter_helper(group, group_id, rank) + + # GATHER + def _test_gather_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None + ): + for dest in group: + tensor = _build_tensor(dest + 1, rank) + tensors = ( + [_build_tensor(dest + 1, -1) for i in group] if rank == dest else [] + ) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + self.call_dist_op( + ":gather", + False, + dist.gather, + tensor, + dst=dest, + gather_list=tensors, + group=group_id, + expect_event=False, + tensor_shapes=[tensors[0].shape] if len(tensors) > 0 else None, + ) + if rank == dest: + expected_tensors = [_build_tensor(dest + 1, i) for i in group] + for t1, t2 in zip(tensors, expected_tensors): + self.assertEqual(t1, t2) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_gather_checks(self): + group, group_id, rank = self._init_global_test() + one = torch.ones([1]) + + # Specify gather_list argument only on destination rank. + if rank == 0: + gather_list = [one.clone() for _ in group] + dist.gather(one * rank, dst=0, gather_list=gather_list) + for i in group: + self.assertEqual(gather_list[i], one * i) + else: + dist.gather(one * rank, dst=0) + + # Don't specify dst argument. + if rank == 0: + gather_list = [one.clone() for _ in group] + dist.gather(one * rank, gather_list=gather_list) + for i in group: + self.assertEqual(gather_list[i], one * i) + else: + dist.gather(one * rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_gather(self): + group, group_id, rank = self._init_global_test() + self._test_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA gather" + ) + @skip_if_no_gpu + def test_gather_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_gather_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @skip_if_small_worldsize + def test_gather_group(self): + group, group_id, rank = self._init_group_test() + self._test_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_gather_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_gather_helper(group, group_id, rank) + + # ALL GATHER + def _test_all_gather_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + for dest in group: + tensor = _build_tensor(dest + 1, rank, dtype=dtype) + tensors = [_build_tensor(dest + 1, -1, dtype=dtype) for i in group] + allgather = dist.all_gather + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + if tensors[0].dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(tensors[0]).shape] + else: + tensor_shapes = [tensors[0].shape] + self.call_dist_op( + ":all_gather", + False, + allgather, + tensors, + tensor, + group_id, + False, + tensor_shapes=tensor_shapes, + ) + + expected_tensors = [ + _build_tensor(dest + 1, i, dtype=dtype) for i in group + ] + for t1, t2 in zip(tensors, expected_tensors): + self.assertEqual(t1, t2) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all gather" + ) + @skip_if_no_gpu + def test_all_gather_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_gather_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_helper(group, group_id, rank, dtype=torch.cfloat) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all gather" + ) + @skip_if_no_gpu + def test_all_gather_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_gather_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports all_gather_v" + ) + @skip_if_no_gpu + def test_all_gather_v_cuda(self): + self._barrier() + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + + output_split_sizes = [] + for dst in group: + output_split_sizes.append(dst + 1) + sum_len = sum(output_split_sizes) + value = 2 + + for async_val in [True, False]: + tensor = ( + torch.empty( + output_split_sizes[rank], sum_len, sum_len, dtype=torch.float + ) + .fill_(value) + .cuda(device_id) + ) + out_tensor = _build_tensor(sum_len, -1, device_id=device_id) + + req = dist.all_gather( + list(torch.split(out_tensor, output_split_sizes)), + tensor, + group_id, + async_val, + ) + if async_val: + req.wait() + + expected_value = value + expected_tensor = _build_tensor( + sum_len, expected_value, device_id=device_id + ) + + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + # Test all_gather accepting single tensor as output + def _all_gather_into_tensor_helper( + self, tensor_out, tensor_in, group_id, rank, cuda=True, rank_to_GPU=None + ): + if cuda: + tensor_in = tensor_in.cuda(rank_to_GPU[rank][0]) + tensor_out = tensor_out.cuda(rank_to_GPU[rank][0]) + if tensor_out.dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(tensor_in).shape] + else: + tensor_shapes = [tensor_in.shape] + self.call_dist_op( + ":all_gather_into_tensor", + False, + dist.all_gather_into_tensor, + tensor_out, + tensor_in, + group_id, + False, + expect_event=False, + tensor_shapes=tensor_shapes, + ) + return tensor_out + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_gather_into_tensor" + ) + @skip_if_no_gpu + def test_all_gather_into_cat_tensor_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + size = 2 + tensor_in = torch.ones([size, size]) * rank + # Concatenated output + tensor_out = torch.ones([len(group) * size, size]) * (-1) + tensor_out = self._all_gather_into_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + + # Check result + # Concatenate all blocks into a bigger tensor + expected_tensor = torch.cat([torch.ones([size, size]) * i for i in group]) + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_gather_into_tensor" + ) + @skip_if_no_gpu + def test_all_gather_into_stack_tensor_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + size = 2 + tensor_in = torch.ones([size, size]) * rank + # Stacked output + tensor_out = torch.ones([len(group), size, size]) * (-1) + tensor_out = self._all_gather_into_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + + # Check result + # Stack all blocks into a bigger tensor + expected_tensor = torch.stack([torch.ones([size, size]) * i for i in group]) + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + def _run_all_gather_coalesced_and_verify( + self, output_tensor_lists, input_tensors, expected_tensors, group_id + ): + """ + Helper that runs all_gather_coalesced and returns true if output + matches expectations. + """ + tensor_shapes = [] + for input_tensor in input_tensors: + if input_tensor.dtype == torch.complex64: + tensor_shapes.append(torch.view_as_real(input_tensor).shape) + else: + tensor_shapes.append(input_tensor.shape) + self.call_dist_op( + ":all_gather", + False, + dist.all_gather_coalesced, + output_tensor_lists, + input_tensors, + group_id, + tensor_shapes=tensor_shapes, + ) + + for l1, l2 in zip(output_tensor_lists, expected_tensors): + for t1, t2 in zip(l1, l2): + if not torch.equal(t1, t2): + return False + return True + + def _test_all_gather_coalesced_helper( + self, group, group_id, rank, dtype=torch.float + ): + # TODO: Instead we should probably go through _rank_not_in_group + # mechanism to disable sending tensors + if group_id is not None: + for test_case_id in range(2, 5): + # Make sure we create tensors of incompatible sizes, e.g. + # [1], [2x2], [3x3x3] ... to be sent in one batch + input_tensors = [ + _build_multidim_tensor( + tensor_id, tensor_id, rank + tensor_id, dtype=dtype + ) + for tensor_id in range(1, test_case_id) + ] + output_tensor_lists = [ + [ + _build_multidim_tensor( + tensor_id, tensor_id, -1, dtype=dtype + ) + for tensor_id in range(1, test_case_id) + ] + for _ in group + ] + expected_tensors = [ + [ + _build_multidim_tensor( + tensor_id, tensor_id, rank_iter + tensor_id, dtype=dtype + ) + for tensor_id in range(1, test_case_id) + ] + for rank_iter in group + ] + assert self._run_all_gather_coalesced_and_verify( + output_tensor_lists, input_tensors, expected_tensors, group_id + ), "output tensors do not match expected ouputs" + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_simple(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_coalesced_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_coalesced_helper( + group, group_id, rank, dtype=torch.cfloat + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_gather_coalesced_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_gather_coalesced_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_with_empty(self): + group, group_id, rank = self._init_global_test() + input_tensors = [ + rank * torch.ones([2, 2]), + torch.ones([0]), + (rank + 1) * torch.ones([3, 3]), + torch.ones([0]), + torch.ones([0]), + ] + output_tensors_lists = [ + [ + -1 * torch.ones([2, 2]), + -1 * torch.ones([0]), + -1 * torch.ones([3, 3]), + -1 * torch.ones([0]), + -1 * torch.ones([0]), + ] + for _ in group + ] + expected_tensors = [ + [ + r * torch.ones([2, 2]), + torch.ones([0]), + (r + 1) * torch.ones([3, 3]), + torch.ones([0]), + torch.ones([0]), + ] + for r in group + ] + assert self._run_all_gather_coalesced_and_verify( + output_tensors_lists, input_tensors, expected_tensors, group_id + ) + self._barrier() + + # AllToAll + def _test_all_to_all_single_equal_split_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + if group_id is not None: + size = len(group) + in_tensor = torch.ones([size, size], dtype=dtype) * rank + expected_tensor = torch.cat( + [torch.ones([1, size], dtype=dtype) * i for i in group] + ) + out_tensor = torch.ones([size, size], dtype=dtype) * -1 + if cuda: + in_tensor = in_tensor.cuda(rank_to_GPU[rank][0]) + expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0]) + out_tensor = out_tensor.cuda(rank_to_GPU[rank][0]) + if dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(in_tensor).shape] + else: + tensor_shapes = [in_tensor.shape] + self.call_dist_op( + ":all_to_all", + False, + dist.all_to_all_single, + out_tensor, + in_tensor, + group=group_id, + tensor_shapes=tensor_shapes, + ) + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + def _test_all_to_all_single_unequal_split_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + if group_id is not None: + size = len(group) + in_splits = [i + 1 for i in group] + out_splits = [rank + 1 for _ in group] + in_tensor = torch.ones([sum(in_splits), size], dtype=dtype) * rank + out_tensor = torch.ones([(rank + 1) * size, size], dtype=dtype) + expected_tensor = torch.cat( + [torch.ones([rank + 1, size], dtype=dtype) * i for i in group] + ) + if cuda: + in_tensor = in_tensor.cuda(rank_to_GPU[rank][0]) + expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0]) + out_tensor = out_tensor.cuda(rank_to_GPU[rank][0]) + dist.all_to_all_single( + out_tensor, in_tensor, out_splits, in_splits, group=group_id + ) + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + def _test_all_to_all_helper( + self, + group, + group_id, + rank, + cuda=False, + rank_to_GPU=None, + dtype=torch.float, + ): + if group_id is not None: + size = len(group) + in_splits = [i + 1 for i in group] + in_tensors = [ + torch.ones([in_splits[i], size], dtype=dtype) * rank + for i, _ in enumerate(group) + ] + out_tensors = [ + torch.ones([(rank + 1), size], dtype=dtype) for _ in group + ] + expected_tensors = [ + torch.ones([rank + 1, size], dtype=dtype) * i for i in group + ] + if cuda: + in_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in in_tensors] + expected_tensors = [ + t.cuda(rank_to_GPU[rank][0]) for t in expected_tensors + ] + out_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in out_tensors] + dist.all_to_all(out_tensors, in_tensors, group=group_id) + for t1, t2 in zip(out_tensors, expected_tensors): + self.assertEqual(t1, t2) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_equal_split(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_equal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_equal_split_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_equal_split_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_equal_split_helper( + group, group_id, rank, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_equal_split_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_unequal_split(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_unequal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_unequal_split_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_unequal_split_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_unequal_split_helper( + group, group_id, rank, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_unequal_split_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + dtype=torch.cfloat, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + def test_all_to_all(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only NCCL supports CUDA all_to_all" + ) + @skip_if_rocm + def test_all_to_all_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + def test_all_to_all_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_helper(group, group_id, rank, dtype=torch.cfloat) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only NCCL supports CUDA all_to_all" + ) + @skip_if_rocm + def test_all_to_all_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + @skip_if_small_worldsize + def test_all_to_all_single_equal_split_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_to_all_single_equal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + @skip_if_small_worldsize + def test_all_to_all_single_equal_split_group_cuda(self): + group, group_id, rank = self._init_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + @skip_if_small_worldsize + def test_all_to_all_single_unequal_split_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_to_all_single_unequal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + @skip_if_small_worldsize + def test_all_to_all_single_unequal_split_group_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + @skip_if_small_worldsize + def test_all_to_all_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_to_all_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_small_worldsize + @skip_if_rocm + def test_all_to_all_group_cuda(self): + group, group_id, rank = self._init_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_equal_split_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_to_all_single_equal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_equal_split_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_unequal_split_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_to_all_single_unequal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_unequal_split_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + def test_all_to_all_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_to_all_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only NCCL supports CUDA all_to_all" + ) + @skip_if_rocm + def test_all_to_all_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU) + + # BARRIER + def _test_barrier_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None + ): + WAIT_TIME = 0.3 # seconds + + for dest in group: + expected_time = torch.DoubleTensor(1).fill_(0.0) + if cuda: + expected_time = expected_time.cuda(rank_to_GPU[rank][0]) + if dest == rank: + expected_time.fill_(time.time() + WAIT_TIME) + dist.broadcast(expected_time, dest, group_id) + time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer + dist.barrier(group_id) + else: + dist.broadcast(expected_time, dest, group_id) + dist.barrier(group_id) + self.assertGreaterAlmostEqual( + float(time.time()), + float(expected_time[0]), + "destination rank: %d, my rank: %d" % (dest, rank) + + " (if you see this failure, please report in #14554)", + ) + + # Use higher timeout for the instance where the test runs + # against a subgroup and uses a CUDA tensor for expected time. + # The CUDA initialization for the participating processes can + # take long enough for the barrier timeout to trigger on the + # process that doesn't participate in the group. + self._barrier(timeout=20) + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND == "mpi", "MPI doesn't supports GPU barrier" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + def test_barrier_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_if_small_worldsize + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND == "mpi", "MPI doesn't supports GPU barrier" + ) + def test_barrier_group_cuda(self): + group, group_id, rank = self._init_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_if_small_worldsize + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND == "mpi", "MPI doesn't supports GPU barrier" + ) + def test_barrier_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["cpu barrier"], + f"{BACKEND} does not support CPU barrier", + ) + def test_barrier(self): + group, group_id, rank = self._init_global_test() + self._test_barrier_helper(group, group_id, rank) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["cpu barrier"], + f"{BACKEND} does not support CPU barrier", + ) + def test_barrier_group(self): + group, group_id, rank = self._init_group_test() + self._test_barrier_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["cpu barrier"], + f"{BACKEND} does not support CPU barrier", + ) + def test_barrier_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_barrier_helper(group, group_id, rank) + + def _model_step(self, model): + for param in model.parameters(): + if param.grad is not None: + with torch.no_grad(): + param += param.grad + param.grad = None + + def _model_step_with_zero_grad(self, model): + for param in model.parameters(): + if param.grad is not None: + with torch.no_grad(): + param += param.grad + param.grad.requires_grad_(False) + param.grad.zero_() + + def _prepare_dummy_data(self, local_bs): + # global_bs for DDP should be divisible by WORLD_SIZE + world_size = int(os.environ["WORLD_SIZE"]) + global_bs = world_size * local_bs + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 4) + loss = nn.MSELoss() + return global_bs, input_cpu, target, loss + + # END TO END TEST FOR DISTRIBUTEDDATAPARALLEL + def _test_DDP_helper( + self, model, input_var, target, loss, scale_factor=1.0, memory_format=None + ): + model.train() + output = model(input_var) + l = loss(output, target) * scale_factor + l.backward() + if memory_format is not None: + self.assertTrue(output.is_contiguous(memory_format=memory_format)) + + def _assert_equal_param(self, param_gpu, param_DDP): + self.assertEqual(len(param_gpu), len(param_DDP)) + for p_gpu, p_DDP in zip(param_gpu, param_DDP): + self.assertEqual(p_gpu, p_DDP) + + def _test_DDP_niter( + self, + model_base, + model_DDP, + input, + target, + loss, + local_bs, + rank, + batch_size, + test_save, + offset=None, + world_size=0, + zero_grad=False, + memory_format=None, + n_iter=5, + ): + for idx in range(n_iter): + # single cpu/gpu training + self._test_DDP_helper( + model_base, input, target, loss, memory_format=memory_format + ) + + if offset is None: + offset = rank * local_bs + + # DDP training, DDP scatters subsets of input_cpu to nodes/GPUs + self._test_DDP_helper( + model_DDP, + input[offset : offset + local_bs], + target[offset : offset + local_bs], + loss, + world_size * local_bs / batch_size if world_size != 0 else 1, + memory_format=memory_format, + ) + + # Update weights and run a second iteration to shake out errors + if zero_grad: + self._model_step_with_zero_grad(model_base) + self._model_step_with_zero_grad(model_DDP) + else: + self._model_step(model_base) + self._model_step(model_DDP) + self._assert_equal_param( + list(model_base.parameters()), list(model_DDP.module.parameters()) + ) + + # Shuffle the input so that DDP input is different + input = input[torch.randperm(batch_size)] + + # save the model in the middle and reload + if test_save and idx == 2 and INIT_METHOD.startswith("file://"): + with tempfile.NamedTemporaryFile() as tmp: + if sys.platform == "win32": + torch.save(model_DDP, tmp) + tmp.seek(0) + model_DDP = torch.load(tmp) + else: + torch.save(model_DDP, tmp.name) + model_DDP = torch.load(tmp.name) + + with tempfile.TemporaryFile() as tmp_file: + torch.save(model_DDP, tmp_file) + tmp_file.seek(0) + saved_model = torch.load(tmp_file) + for k in model_DDP.state_dict(): + self.assertEqual(model_DDP.state_dict()[k], saved_model.state_dict()[k]) + + def _test_DistributedDataParallel( + self, + gpu_subset, + rank, + output_device=None, + gradient_as_bucket_view=False, + static_graph=False, + set_static_graph_twice=False, + ): + # Run a simple end to end DDP model, use result of single node model + # as baseline + + # cpu training setup + model = DDP_NET + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpu_subset[0]) + + # DDP training setup + model_DDP = copy.deepcopy(model) + model_DDP.cuda(gpu_subset[0]) + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP, + device_ids=gpu_subset, + gradient_as_bucket_view=gradient_as_bucket_view, + static_graph=static_graph, + ) + + if set_static_graph_twice: + model_DDP._set_static_graph() + + # test serializable/unserializable + with tempfile.NamedTemporaryFile() as tmp: + if sys.platform == "win32": + torch.save(model_DDP, tmp) + tmp.seek(0) + model_DDP = torch.load(tmp) + else: + torch.save(model_DDP, tmp.name) + model_DDP = torch.load(tmp.name) + + # dummy data initialization + local_bs = len(gpu_subset) + global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs) + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpu_subset[0]), + target.cuda(gpu_subset[0]), + loss, + local_bs, + rank, + global_bs, + True, + ) + self._barrier() + + def _test_DistributedDataParallelCPU(self, gradient_as_bucket_view=False): + # Run a simple end to end DDP-CPU model, use result of single node + # model as baseline + group, group_id, rank = self._init_global_test() + + # cpu training setup + model_base = DDP_NET + + # DDP-CPU training setup + model_DDP = copy.deepcopy(model_base) + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP, gradient_as_bucket_view=gradient_as_bucket_view + ) + + # dummy data initialization + local_bs = 2 + global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs) + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_base, + model_DDP, + input_cpu, + target, + loss, + local_bs, + rank, + global_bs, + False, + zero_grad=True, + ) + self._barrier() + + return model_DDP + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_DistributedDataParallelCPU(self): + self._test_DistributedDataParallelCPU() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_DistributedDataParallelCPU_grad_is_view(self): + self._test_DistributedDataParallelCPU(gradient_as_bucket_view=True) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_DistributedDataParallel_requires_grad(self): + # a module without gradients shouldn't be accepted + self.assertRaises( + RuntimeError, lambda: nn.parallel.DistributedDataParallel(nn.Module()) + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_zero_output_features(self): + class ToyModel(nn.Module): + def __init__(self): + super().__init__() + self.net1 = nn.Linear(10, 10) + self.relu = nn.ReLU() + self.net2 = nn.Linear(10, 0) + + model = ToyModel().to(self.rank) + ddp_model = nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank] + ) + + @skip_but_pass_in_sandcastle_if(BACKEND == "nccl", "Gloo-only test") + def test_ddp_create_graph(self): + class Model(nn.Module): + def __init__(self): + super().__init__() + self.p = nn.Parameter(torch.tensor(1.0)) + + def forward(self): + return self.p.pow(2) + + model = Model() + ddp_model = torch.nn.parallel.DistributedDataParallel(model) + for _ in range(6): + # Verify DDP doesn't throw when ran with create_graph=True. + # Although we do warn about potential issues, please see + # https://github.com/pytorch/pytorch/issues/63929 for details. + ddp_model().backward(create_graph=True) + # grad tensors should require grad. + self.assertTrue( + all(param.requires_grad for param in ddp_model.parameters()) + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_DistributedDataParallel_non_default_stream(self): + stream = torch.cuda.Stream(self.rank) + rank = self.rank + with torch.cuda.stream(stream): + net = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(rank), device_ids=[rank] + ) + for i in range(1000): + # Clear gradients manually + grad = net.module.weight.grad + if grad is not None: + grad.requires_grad_(False) + grad.zero_() + # Forward + BW + batch = torch.tensor([rank]).float().cuda(rank) + loss = net(batch).sum() + loss.backward() + # For each worker, the gradient on the weight should be worker_rank. + grad = net.module.weight.grad + avg = grad.clone() + # All-reducing the gradient averages should give us the gradient + # average. If not, then one of the workers has not correctly + # written back the averaged gradient before this all-reduce call. + dist.all_reduce(avg) + world_size = int(os.environ["WORLD_SIZE"]) + avg.div_(world_size) + expected_grad = sum(i for i in range(world_size)) / world_size + self.assertEqual( + avg[0, 0], + expected_grad, + msg=f"Expected gradient of {expected_grad} but got {avg} on rank {self.rank}", + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_comm_hook_logging(self): + hooks = [ + default.allreduce_hook, + default.fp16_compress_hook, + powerSGD.powerSGD_hook, + powerSGD.batched_powerSGD_hook, + quantization_hooks.quantization_pertensor_hook, + quantization_hooks.quantization_perchannel_hook, + ] + + cpp_builtin_hooks = [ + dist.BuiltinCommHookType.ALLREDUCE, + dist.BuiltinCommHookType.FP16_COMPRESS, + ] + + for hook in hooks: + ddp_model = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(self.rank), + device_ids=[self.rank], + ) + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Hook not registered yet, so should be empty + self.assertEqual(ddp_logging_data.get("comm_hook"), None) + ddp_model.register_comm_hook(None, hook) + ddp_logging_data = ddp_model._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("comm_hook"), hook.__qualname__) + + for hook in cpp_builtin_hooks: + ddp_model = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(self.rank), + device_ids=[self.rank], + ) + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Hook not registered yet, so should be empty + self.assertEqual(ddp_logging_data.get("comm_hook"), None) + ddp_model._register_builtin_comm_hook(hook) + ddp_logging_data = ddp_model._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("comm_hook"), str(hook)) + + # No hook registered + ddp_model = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(self.rank), + device_ids=[self.rank], + ) + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Hook not registered yet, so should be empty + self.assertEqual(ddp_logging_data.get("comm_hook"), None) + # After second forward pass, hook should still be empty string + for i in range(2): + inp = torch.ones(1, 1, device=self.rank) + loss = ddp_model(inp).sum() + loss.backward() + + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Note: DETAIL debug mode logs DDP logging data to stdout and + # thus accesses std::map, which fills in a default value for the + # type if it didn't exist. + self.assertEqual(ddp_logging_data.get("comm_hook", ""), "") + + def _test_ddp_hook_with_optimizer_parity( + self, + grad_as_bucket_view, + static_graph, + optim_cls, + optimize_subset, + *functional_optim_args, + **functional_optim_kwargs, + ): + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + models_to_test = [ + (LargeNet(), torch.randn(1, 1000).cuda()), + ] + if HAS_TORCHVISION: + models_to_test.append( + (torchvision.models.resnet50(), torch.randn(1, 3, 3, 1000).cuda()) + ) + for (model, inp) in models_to_test: + # Enable determinism in cudnn operators + with torch.backends.cudnn.flags( + enabled=True, deterministic=True, benchmark=False + ): + # Create DDP model that runs optimizer in fused fashion. + ddp_model_with_optimizer_hook = ( + torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_as_bucket_view, + static_graph=static_graph, + ) + ) + + # Create DDP model with no hook that does optimizer after + # backward. + ddp_model_with_no_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_as_bucket_view, + static_graph=static_graph, + ) + hook_params = ddp_model_with_optimizer_hook.parameters() + no_hook_params = ddp_model_with_no_hook.parameters() + if optimize_subset: + hook_params = list(hook_params) + no_hook_params = list(no_hook_params) + self.assertGreater(len(hook_params), 0) + hook_params = [hook_params[0]] + no_hook_params = [no_hook_params[0]] + + # Register a fused optimizer that will run optimizer in step + # with allreduce. + + if optimize_subset: + # API where optim_params is specified. + ddp_model_with_optimizer_hook._register_fused_optim( + optim_cls, + *functional_optim_args, + optim_params=hook_params, + **functional_optim_kwargs, + ) + else: + # API where optim_params is omitted + ddp_model_with_optimizer_hook._register_fused_optim( + optim_cls, + *functional_optim_args, + **functional_optim_kwargs, + ) + + optimizer_no_hook = optim_cls( + no_hook_params, + *functional_optim_args, + **functional_optim_kwargs, + ) + + # Verify parameters are equal initially. + for hook_param, allreduce_param in zip( + ddp_model_with_optimizer_hook.parameters(), + ddp_model_with_no_hook.parameters(), + ): + self.assertEqual(hook_param, allreduce_param) + + # Save old parameters to later verify optimizer modified them. + opt_hook_init_params = copy.deepcopy( + list(ddp_model_with_optimizer_hook.parameters()) + ) + + # Run optimizer with hook model. + for i in range(6): + ddp_model_with_optimizer_hook.zero_grad() + out = ddp_model_with_optimizer_hook(inp) + loss = out.sum() + loss.backward() + + dist.barrier() + + # Run regular model. + for i in range(6): + ddp_model_with_no_hook.zero_grad() + out = ddp_model_with_no_hook(inp) + loss = out.sum() + loss.backward() + optimizer_no_hook.step() + + dist.barrier() + + # Now verify parameters are equal. + for hook_param, allreduce_param in zip( + ddp_model_with_optimizer_hook.parameters(), + ddp_model_with_no_hook.parameters(), + ): + self.assertEqual(hook_param, allreduce_param) + + # Verify optimizer modified appropriate parameter set, + # otherwise they'd be trivially equal above. + if optimize_subset: + self.assertNotEqual( + opt_hook_init_params[0], + next(iter(ddp_model_with_optimizer_hook.parameters())), + ) + # Untouched params should be equal + self.assertEqual( + opt_hook_init_params[1:], + list(ddp_model_with_optimizer_hook.parameters())[1:], + ) + else: + self.assertNotEqual( + opt_hook_init_params, + list(ddp_model_with_optimizer_hook.parameters()), + ) + dist.barrier() + + """ + # Commenting out the following 3 tests as they cause Sandcastle jobs to fail + # Failure signature: + # AttributeError: type object 'TestDistBackendWithSpawn' has no attribute 'test_ddp_hook_with_optimizer_parity_adamw + + from torch.testing._internal.common_utils import parametrize + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl" or BACKEND == "ucc", + "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", + ) + @skip_if_lt_x_gpu(2) + @parametrize("grad_as_bucket_view", [True, False]) + @parametrize("static_graph", [True, False]) + @parametrize("optimize_subset", [True, False]) + def test_ddp_hook_with_optimizer_parity_adamw( + self, + grad_as_bucket_view, + static_graph, + optimize_subset, + ): + adamw_lr = 1e-2 + adamw_betas = (0.9, 0.99) + adamw_eps = 1e-6 + self._test_ddp_hook_with_optimizer_parity( + grad_as_bucket_view, + static_graph, + torch.optim.AdamW, + optimize_subset, + adamw_lr, + betas=adamw_betas, + eps=adamw_eps, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl" or BACKEND == "ucc", + "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", + ) + @skip_if_lt_x_gpu(2) + @parametrize("optimize_subset", [True, False]) + def test_ddp_hook_with_optimizer_parity_adam(self, optimize_subset): + adam_lr = 1e-2 + adam_betas = (0.9, 0.99) + adam_eps = 1e-6 + self._test_ddp_hook_with_optimizer_parity( + True, # grad as bucket view + False, # static graph + torch.optim.Adam, + optimize_subset, + adam_lr, + betas=adam_betas, + eps=adam_eps, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl" or BACKEND == "ucc", + "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", + ) + @skip_if_lt_x_gpu(2) + @parametrize("optimize_subset", [True, False]) + def test_ddp_hook_with_optimizer_parity_sgd(self, optimize_subset): + sgd_lr = 1e-2 + sgd_momentum = 0.9 + sgd_weight_decay = 0.01 + # Not testing grad_as_bucket_view and static_graph as they are + # tested in AdamW test above. + self._test_ddp_hook_with_optimizer_parity( + True, # grad as bucket view + False, # static_graph + torch.optim.SGD, + optimize_subset, + sgd_lr, + momentum=sgd_momentum, + weight_decay=sgd_weight_decay, + ) + """ + + @skip_if_lt_x_gpu(2) + def test_get_data_parallel_params(self): + torch.cuda.set_device(self.rank) + model = TwoLinLayerNet().cuda() + # Parameters to ignore are in the format {module_name}.{param_name} + params_to_ignore = ["a.weight"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, params_to_ignore + ) + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank] + ) + dp_params = torch.nn.parallel.DistributedDataParallel._get_data_parallel_params( + model, named_params=True + ) + for name, _ in dp_params: + self.assertNotEqual(f"module.{params_to_ignore[0]}", name) + + # test named_params=False, just check if returns the expected + # no of parameters. + num_ddp_params = len(list(model.parameters())) - 1 + count = 0 + dp_params = torch.nn.parallel.DistributedDataParallel._get_data_parallel_params(model, named_params=False) + for _ in dp_params: + count += 1 + self.assertEqual(count, num_ddp_params) + + def _test_ddp_apply_optim_in_backward( + self, + optim_cls, + optim_kwargs, + init_before, + gradient_as_bucket_view=True, + ): + # Need to seed to ensure inputs are unique across rank. Otherwise, + # allreduce won't have any effect. + torch.manual_seed(self.rank) + torch.cuda.manual_seed(self.rank) + torch.cuda.set_device(self.rank) + + # Test a simple linear as well as a ResNet model. + models_to_test = [ + nn.Sequential(nn.Linear(3, 3), nn.Linear(3, 3), nn.Linear(3, 3)).cuda() + ] + if HAS_TORCHVISION: + models_to_test.append(torchvision.models.resnet50().cuda()) + + for j, model in enumerate(models_to_test): + model_optim_in_bwd = copy.deepcopy(model) + model = nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + gradient_as_bucket_view=gradient_as_bucket_view, + ) + optim = optim_cls(model.parameters(), **optim_kwargs) + if init_before: + _apply_optimizer_in_backward( + optimizer_class=optim_cls, + params=model_optim_in_bwd.parameters(), + optimizer_kwargs=optim_kwargs, + ) + model_optim_in_bwd = nn.parallel.DistributedDataParallel( + model_optim_in_bwd, + device_ids=[self.rank], + gradient_as_bucket_view=gradient_as_bucket_view, + ) + if not init_before: + _apply_optimizer_in_backward( + optimizer_class=optim_cls, + params=model_optim_in_bwd.parameters(), + optimizer_kwargs=optim_kwargs, + ) + + for p1, p2 in zip(model.parameters(), model_optim_in_bwd.parameters()): + self.assertEqual(p1, p2, "Parameters not initially equal!") + # Enable determinism in cudnn operators + with torch.backends.cudnn.flags( + enabled=True, deterministic=True, benchmark=False + ): + for i in range(8): + inp = ( + torch.randn(1, 3, 1000, 1000, device="cuda") + if j == 1 + else torch.randn(10, 3, device="cuda") + ) + model(inp).sum().backward() + optim.step() + model_optim_in_bwd( + inp + ).sum().backward() # runs optimizer as well + for p1, p2 in zip( + model.parameters(), model_optim_in_bwd.parameters() + ): + self.assertEqual( + p1, p2, f"Params not equal at iteration {i}" + ) + self.assertTrue( + p2.grad is None, + f"Optim in backward grad is not None at {i}", + ) + + # set_to_none for regular optimizer to match in backward + # case. + optim.zero_grad(set_to_none=True) + + @skip_if_lt_x_gpu(2) + def test_ddp_apply_optim_in_backward(self): + for optim_cls, init_before in itertools.product( + [torch.optim.SGD, torch.optim.Adam], [True, False] + ): + with self.subTest(optim_cls=optim_cls): + self._test_ddp_apply_optim_in_backward( + optim_cls=optim_cls, + optim_kwargs={"lr": 0.03}, + init_before=init_before, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_apply_optim_in_backward_grad_as_bucket_view_false(self): + for init_before in [True, False]: + self._test_ddp_apply_optim_in_backward( + optim_cls=torch.optim.SGD, + optim_kwargs={"lr": 0.03}, + init_before=init_before, + gradient_as_bucket_view=False, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_apply_optim_in_backward_ignored_params(self): + torch.cuda.set_device(self.rank) + for init_before in [True, False]: + with self.subTest(init_before=init_before): + torch.manual_seed(self.rank) + torch.cuda.manual_seed(self.rank) + model = TwoLinLayerNet() + # Parameters to ignore are in the format {module_name}.{param_name} + params_to_ignore = ["a.weight"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, params_to_ignore + ) + if init_before: + _apply_optimizer_in_backward( + optimizer_class=torch.optim.SGD, + params=model.parameters(), + optimizer_kwargs={"lr": 0.03}, + ) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + if not init_before: + _apply_optimizer_in_backward( + optimizer_class=torch.optim.SGD, + params=model.parameters(), + optimizer_kwargs={"lr": 0.03}, + ) + inp = torch.randn(1, 10) + a, b = net(inp) + (a.transpose(0, 1) @ b).sum().backward() + # a.weight did not go through allreduce, so optimizer acted on local + # gradient, which should be different across ranks. Remaining params + # should be equal. + models = [None for _ in range(dist.get_world_size())] + dist.all_gather_object(models, model) + rank0_model, remainder = models[0], models[1:] + for m in remainder: + self.assertNotEqual(rank0_model.a.weight, m.a.weight) + self.assertEqual( + list(rank0_model.b.parameters()), list(m.b.parameters()) + ) + self.assertEqual(rank0_model.a.bias, m.a.bias) + + def _get_fp16_config(self) -> _MixedPrecision: + return _MixedPrecision( + param_dtype=torch.float16, + reduce_dtype=torch.float16, + buffer_dtype=torch.float16, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_ignored_params(self): + rank = self.rank + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + torch.cuda.set_device(rank) + model = TwoLinLayerNet() + model.register_buffer("buffer", torch.ones(5)) + # Parameters to ignore are in the format {module_name}.{param_name} + to_ignore = ["a.weight", "buffer"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, to_ignore, + ) + mp_config = self._get_fp16_config() + net = torch.nn.parallel.DistributedDataParallel( + model.to(rank), + device_ids=[rank], + mixed_precision=mp_config, + gradient_as_bucket_view=True, + ) + to_ignore = [f"module.{name}" for name in to_ignore] + expected_ignored = len(to_ignore) + n_ignored = 0 + # ignored params should not have _mp_param or _fp_param fields. + for (n, p) in itertools.chain(net.named_parameters(), net.named_buffers()): + if n in to_ignore: + n_ignored += 1 + self.assertFalse(hasattr(p, '_mp_param')) + self.assertFalse(hasattr(p, '_fp_param')) + else: + self.assertEqual(mp_config.param_dtype, p._mp_param.dtype) + self.assertEqual(torch.float32, p._fp_param.dtype) + + self.assertEqual(expected_ignored, n_ignored) + + def _test_ddp_native_mixed_precision( + self, gradient_as_bucket_view, set_grad_to_none + ): + rank = self.rank + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + torch.cuda.set_device(rank) + inp = torch.randn(10, 1) + mp_config = self._get_fp16_config() + + class MyModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.m = torch.nn.Linear(1, 5) + self.register_buffer('buffer', torch.randn(1, 2)) + self.p = torch.nn.Parameter( + torch.randn(10, 5), requires_grad=False + ) + + def forward(self_, x): # noqa: B902 + params = self_.m.parameters() + for p in params: + self.assertEqual(mp_config.param_dtype, p.dtype) + + self.assertEqual(self_.buffer.dtype, mp_config.buffer_dtype) + + self.assertEqual(mp_config.param_dtype, x.dtype) + return self_.m(x) + self_.p + + m = MyModel() + + net = torch.nn.parallel.DistributedDataParallel( + m.to(rank), + device_ids=[rank], + mixed_precision=mp_config, + gradient_as_bucket_view=gradient_as_bucket_view, + ) + # Buffers are casted in constructor. + self.assertEqual(net.module.buffer.dtype, mp_config.buffer_dtype) + # Each param should have an mp_param in the lower precision, and + # an fp_param in the higher precision. + for p in net.parameters(): + self.assertEqual(mp_config.param_dtype, p._mp_param.dtype) + self.assertEqual(torch.float32, p._fp_param.dtype) + + for i in range(6): + loss = net(inp).sum() + loss.backward() + # Verify gradient synchronization and params and grads are fp32. + for n, param in net.named_parameters(): + self.assertEqual(param.dtype, torch.float32) + if param.grad is None: + assert n == 'module.p' # Only param that doesn't require grad + else: + self.assertEqual(param.grad.dtype, torch.float32) + tensor_list = [ + torch.zeros_like(param.grad) + for _ in range(dist.get_world_size(net.process_group)) + ] + dist.all_gather(tensor_list, param.grad) + g, rest = tensor_list[0], tensor_list[1:] + self.assertEqual(g.dtype, torch.float32) + for g_ in rest: + self.assertEqual(g_.dtype, torch.float32) + self.assertEqual(g, g_) + net.zero_grad(set_to_none=set_grad_to_none) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_no_grad_as_bucket_view_no_set_grad_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=False, + set_grad_to_none=False, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_grad_as_bucket_view_no_set_grad_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=True, + set_grad_to_none=False, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_grad_as_bucket_view_set_grad_to_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=True, set_grad_to_none=True + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_no_grad_as_bucket_view_set_grad_to_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=True, set_grad_to_none=True + ) + + def _test_ddp_hook_parity(self, state, hook, num_validated_iters=100): + rank = self.rank + m = torch.nn.Linear(1, 5) + try: + process_group = state.process_group + except AttributeError: + process_group = state + + net_with_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(m).to(rank), + device_ids=[rank], + process_group=process_group, + ) + net_with_hook.register_comm_hook(state=state, hook=hook) + net_without_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(m).to(rank), + device_ids=[rank], + process_group=process_group, + ) + for i in range(100): + # Clear gradients manually. + for g in [ + net_without_hook.module.weight.grad, + net_with_hook.module.weight.grad, + ]: + if g is not None: + g.requires_grad_(False) + g.zero_() + # Forward + BW + batch = torch.tensor([rank]).float().cuda(rank) + loss = net_without_hook(batch).sum() + loss.backward() + # For each worker, the gradient on the weight should be worker_rank. + grad = net_without_hook.module.weight.grad + avg = grad.clone() + expected_grad = ( + sum(i for i in range(dist.get_world_size())) / dist.get_world_size() + ) + loss_hook = net_with_hook(batch).sum() + loss_hook.backward() + grad_hook = net_with_hook.module.weight.grad + avg_hook = grad_hook.clone() + + if i < num_validated_iters: + # Verify hook grad with expected. + self.assertEqual( + avg_hook[0, 0].item(), + expected_grad, + msg=f"Expected hook grad of {expected_grad} but got {avg_hook[0, 0]}", + ) + # Verify hook grad with vanilla allreduce + self.assertEqual( + avg_hook[0, 0], + avg[0, 0], + msg=f"Expected hook grad to be close to allreduce {avg[0, 0]}, but got {avg_hook[0, 0]}", + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_allreduce(self): + self._test_ddp_hook_parity(state=None, hook=default.allreduce_hook) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_allreduce_process_group(self): + # process_group is passed in to both DDP and comm. hook + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + gpus = [rank_to_GPU[int(r)][0] for r in range(world_size)] + process_group = torch.distributed.new_group(gpus) + self._test_ddp_hook_parity(state=process_group, hook=default.allreduce_hook) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_powerSGD(self): + for warm_start in [True, False]: + powersgd_state = powerSGD.PowerSGDState( + process_group=None, + matrix_approximation_rank=1, + start_powerSGD_iter=2, + warm_start=warm_start, + ) + self._test_ddp_hook_parity( + state=powersgd_state, hook=powerSGD.powerSGD_hook + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_but_pass_in_sandcastle_if( + NO_MULTIPROCESSING_SPAWN, + "Disabled for environments that \ + don't support multiprocessing with spawn start method", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_post_localSGD(self): + # Although we start run local SGD at iteration 10, since we still use the global process group to run it, + # the post-LocalSGD actually still allreduces gradients globally for the remaining iterations. + state = post_localSGD.PostLocalSGDState( + process_group=None, subgroup=dist.group.WORLD, start_localSGD_iter=10 + ) + self._test_ddp_hook_parity( + state=state, hook=post_localSGD.post_localSGD_hook + ) + # Only validate the warmup iterations before local SGD is applied, + # because when `post_local_gradient_allreduce` is disabled, the gradients will not be synchronized at all. + # Note that in practice a model averager has to be applied to run model averaging, + # so local gradient averaging is not necessary. + start_localSGD_iter = 10 + state = post_localSGD.PostLocalSGDState( + process_group=None, + subgroup=dist.group.WORLD, + start_localSGD_iter=start_localSGD_iter, + post_local_gradient_allreduce=False, + ) + self._test_ddp_hook_parity( + state=state, + hook=post_localSGD.post_localSGD_hook, + num_validated_iters=start_localSGD_iter, + ) + + # When `subgroup` is None, it is equivalent to the subgroup on the each node. + # For this single-node test environment, the intra-node process group is equivalent to + # the global process group. + if self.world_size == dist.get_world_size(): + state = post_localSGD.PostLocalSGDState( + process_group=None, subgroup=None, start_localSGD_iter=10 + ) + self._test_ddp_hook_parity( + state=state, hook=post_localSGD.post_localSGD_hook + ) + + # Since we start local SGD later than the total number of 100 iterations, + # no local SGD actually is executed, and we don't even need to provide a subgroup for this case. + state = post_localSGD.PostLocalSGDState( + process_group=None, subgroup=None, start_localSGD_iter=1000 + ) + self._test_ddp_hook_parity( + state=state, hook=post_localSGD.post_localSGD_hook + ) + + def _prepare_single_device_module( + self, + rank, + process_group, + devices, + device_ids, + global_batch_size, + gradient_as_bucket_view=False, + ): + model = Net() + device = devices[0] if devices else torch.device("cuda:%d" % rank) + ddp_model = DistributedDataParallel( + copy.deepcopy(model).to(device), + device_ids=device_ids, + process_group=process_group, + bucket_cap_mb=0.001, + gradient_as_bucket_view=gradient_as_bucket_view, + ) + + model.to(device) + + input = torch.randn(global_batch_size, 2).to(device) + target = torch.randn(global_batch_size, 4).to(device) + + return model, ddp_model, input, target + + def _prepare_cpu_module( + self, + process_group, + global_batch_size, + gradient_as_bucket_view=False, + ): + model = Net() + ddp_model = DistributedDataParallel( + copy.deepcopy(model), + process_group=process_group, + bucket_cap_mb=0.001, + gradient_as_bucket_view=gradient_as_bucket_view, + ) + input = torch.randn(global_batch_size, 2) + target = torch.randn(global_batch_size, 4) + return model, ddp_model, input, target + + def _test_accumulate_gradients_no_sync( + self, num_iters=2, ddp_comm_hook=None, gradient_as_bucket_view=False + ): + """ + This is the recommended way to implement accumulate grads. + If ``ddp_comm_hook`` input was specified, it will also register that hook + to the ``ddp_model``. The hook fed into this function should not change + the resulting gradients. + """ + group, group_id, rank = self._init_global_test() + world_size = get_world_size() + + # FIXME: Add testing for gloo/CUDA + if BACKEND == "mpi" or BACKEND == "gloo": + global_batch_size = world_size + local_batch_size = 1 + model, ddp_model, input, target = self._prepare_cpu_module( + group_id, global_batch_size, gradient_as_bucket_view + ) + + if BACKEND == "nccl": + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + int_devices = rank_to_GPU[rank][:1] + devices = [torch.device("cuda:" + str(i)) for i in int_devices] + global_batch_size = world_size + local_batch_size = len(devices) + model, ddp_model, input, target = self._prepare_single_device_module( + rank, + group_id, + devices, + devices, + global_batch_size, + gradient_as_bucket_view, + ) + + if ddp_comm_hook is not None: + ddp_model.register_comm_hook(group_id, ddp_comm_hook) + + def step_model(model, input, target): + model.train() + output = model(input) + loss = F.mse_loss(output, target.to(output.device)) + loss.backward() + + # ensure accumulate grads works with no_grad => no grads are accumulated. + with torch.no_grad(): + with ddp_model.no_sync(): + ddp_model.train() + ddp_model(input) + + # check two model parameters over num_iters iterations + for iteration in range(num_iters): + step_model(model, input, target) + + ddp_input = input[ + rank * local_batch_size : (rank + 1) * local_batch_size + ] + ddp_target = target[ + rank * local_batch_size : (rank + 1) * local_batch_size + ] + + if iteration % 2 == 0: + # accumulate grads locally + with ddp_model.no_sync(): + step_model(ddp_model, ddp_input, ddp_target) + else: + # sync grads + step_model(ddp_model, ddp_input, ddp_target) + + for i, j in zip(model.parameters(), ddp_model.parameters()): + if not i.requires_grad: + continue + if iteration % 2 == 0: + self.assertNotEqual(i.grad, j.grad) + else: + self.assertEqual(i.grad, j.grad) + + # Shuffle the input so that DDP input is different + torch.manual_seed(1337 + iteration) + input = input[torch.randperm(global_batch_size)] + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync(self): + """ + Runs _test_accumulate_gradients_no_sync using default inputs + """ + self._test_accumulate_gradients_no_sync() + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync_grad_is_view(self): + """ + Runs _test_accumulate_gradients_no_sync using default inputs + """ + self._test_accumulate_gradients_no_sync(gradient_as_bucket_view=True) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync_allreduce_hook(self): + """ + Runs multiple iterations on _test_accumulate_gradients_no_sync + using allreduce hook and validates whether future result was properly + passed as gradients in reducer. + """ + + world_size = get_world_size() + + def allreduce_hook( + group_id: object, bucket: dist.GradBucket + ) -> torch.futures.Future[torch.Tensor]: + tensors = [bucket.buffer() / world_size] + return ( + group_id.allreduce(tensors) + .get_future() + .then(lambda fut: fut.value()[0]) + ) + + self._test_accumulate_gradients_no_sync( + num_iters=4, ddp_comm_hook=allreduce_hook + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync_allreduce_with_then_hook(self): + """ + Runs multiple iterations on _test_accumulate_gradients_no_sync using allreduce + hook that also uses then callbacks. In first then callback result is multiplied + by 2, and the second callback divides the result by 2 * world_size. It validates + whether final result was properly passed as gradients in reducer. + """ + + world_size = get_world_size() + + def allreduce_with_then_hook( + group_id: object, bucket: dist.GradBucket + ) -> torch.futures.Future[torch.Tensor]: + fut = group_id.allreduce([bucket.buffer()]).get_future() + + def mult(fut): + # Multiply the result by 2. + return 2 * fut.wait()[0] + + def div(fut): + # Divide the result by 2 * world_size. + return fut.wait() / (2 * world_size) + + return fut.then(mult).then(div) + + self._test_accumulate_gradients_no_sync( + num_iters=4, ddp_comm_hook=allreduce_with_then_hook + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_get_future(self): + def mult(fut): + return [t * 3 for t in fut.wait()] + + def add(fut): + return [t + 1 for t in fut.wait()] + + group, group_id, rank = self._init_global_test() + input = _build_tensor(3, 2) + if BACKEND == "nccl": + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + input = input.to(device_id) + fut = group_id.allreduce([input]).get_future() + res = fut.then(mult).then(add).wait() + expected = _build_tensor(3, 2 * len(group) * 3 + 1) + + self.assertEqual(res[0], expected) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + gpus = list(rank_to_GPU[rank]) + + for use_bucket_view, static_graph in itertools.product( + (False, True), (False, True) + ): + self._test_DistributedDataParallel( + gpu_subset=gpus, + rank=rank, + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + ) + + # test set static graph twice + self._test_DistributedDataParallel( + gpu_subset=gpus, + rank=rank, + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + set_static_graph_twice=True, + ) + + # test output_device + self._test_DistributedDataParallel( + gpu_subset=gpus, + rank=rank, + output_device=torch.device("cuda"), + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + ) + + # test device_ids + gpus_list = [torch.device("cuda:" + str(i)) for i in gpus] + self._test_DistributedDataParallel( + gpu_subset=gpus_list, + rank=rank, + output_device=torch.device("cuda"), + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + ) + + def _test_DistributedDataParallel_with_amp(self, grad_is_view=False): + torch.manual_seed(31415) + # Creates model and optimizer in default precision + model = copy.deepcopy(DDP_NET).cuda() + optimizer = torch.optim.SGD(model.parameters(), lr=0.03) + + # Creates a GradScaler once at the beginning of training. + scaler = GradScaler() + + ddp_model = nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank], gradient_as_bucket_view=grad_is_view + ) + + input = torch.randn(dist.get_world_size() * 2, 2).cuda() + target = torch.randn(dist.get_world_size() * 2, 4).cuda() + loss_fn = nn.MSELoss() + + # verify grads are none before training + for p in ddp_model.parameters(): + self.assertTrue(p is not None) + self.assertTrue(p.grad is None) + + for idx in range(20): + optimizer.zero_grad() + # Runs the forward pass with autocasting. + with autocast(): + output = ddp_model(input) + loss = loss_fn(output, target) + + # Scales loss. Calls backward() on scaled loss to create scaled gradients. + # Backward passes under autocast are not recommended. + # Backward ops run in the same dtype autocast chose for corresponding forward ops. + scaler.scale(loss).backward() + + # verify grads are not none and are valid during training + for p in ddp_model.parameters(): + if p.requires_grad: + self.assertTrue(p.grad is not None) + self.assertFalse(p.grad.isnan().any()) + self.assertFalse(p.grad.isinf().any()) + + # scaler.step() first unscales the gradients of the optimizer's assigned params. + # If these gradients do not contain infs or NaNs, optimizer.step() is then called, + # otherwise, optimizer.step() is skipped. + scaler.step(optimizer) + + # Updates the scale for next iteration. + scaler.update() + + # Shuffle the input so that DDP input is different + torch.manual_seed(1337 + idx) + input = input[torch.randperm(dist.get_world_size() * 2)] + + return ddp_model + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_with_amp_and_grad_is_view(self): + torch.cuda.set_device(self.rank) + ddp_model_grad_not_view = self._test_DistributedDataParallel_with_amp( + grad_is_view=False + ) + ddp_model_grad_is_view = self._test_DistributedDataParallel_with_amp( + grad_is_view=True + ) + for i, j in zip( + ddp_model_grad_not_view.parameters(), + ddp_model_grad_is_view.parameters(), + ): + self.assertEqual(i, j) + + def _test_DistributedDataParallel_SyncBatchNorm( + self, + gpu_subset, + rank, + local_bs, + global_bs, + offset, + output_device=None, + affine=True, + ): + # Run a simple end to end DDP model, use result of single node model + # as baseline + + # cpu training setup + model = BN_NET if affine else BN_NET_NO_AFFINE + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpu_subset[0]) + + # DDP training setup + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model)) + model_DDP.cuda(gpu_subset[0]) + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP, device_ids=gpu_subset + ) + + # test serializable/unserializable + with tempfile.NamedTemporaryFile() as tmp: + if sys.platform == "win32": + torch.save(model_DDP, tmp) + tmp.seek(0) + model_DDP = torch.load(tmp) + else: + torch.save(model_DDP, tmp.name) + model_DDP = torch.load(tmp.name) + + # data initialization + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 4) + loss = nn.MSELoss() + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpu_subset[0]), + target.cuda(gpu_subset[0]), + loss, + local_bs, + rank, + global_bs, + True, + offset, + dist.get_world_size(), + 5 if affine else 2, + ) + self._barrier() + + def _test_post_localSGD_optimizer_parity(self, create_averager, grad_is_view): + learning_rate = 0.03 + + net = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(DDP_NET).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_is_view, + ) + averager = create_averager() + opt = torch.optim.SGD(net.parameters(), lr=learning_rate) + + net_using_post_localSGD_opt = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(DDP_NET).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_is_view, + ) + # Process group cannot be pickled in some environments, + # so cannot deep copy an averager. See: + # https://github.com/pytorch/pytorch/pull/74737#pullrequestreview-922487496 + averager2 = create_averager() + post_localSGD_opt = self._create_post_localSGD_optimizer( + net_using_post_localSGD_opt, learning_rate, averager2 + ) + + input = torch.randn(dist.get_world_size() * 2, 2).cuda() + target = torch.randn(dist.get_world_size() * 2, 4).cuda() + loss_fn = nn.MSELoss() + + for _ in range(20): + self._perform_a_train_step(opt, net, loss_fn, input, target) + averager.average_parameters(net.parameters()) + + self._perform_a_train_step( + post_localSGD_opt, + net_using_post_localSGD_opt, + loss_fn, + input, + target, + ) + for p1, p2 in zip( + net.parameters(), net_using_post_localSGD_opt.parameters() + ): + self.assertEqual(p1.data, p2.data) + + # Also check if the built-in step counters are the same to prevent a bug like #74737. + self.assertEqual(averager.step, averager2.step) + + def _create_periodic_model_averager(self): + return averagers.PeriodicModelAverager(period=4, warmup_steps=10) + + def _create_post_localSGD_optimizer(self, net, learning_rate, averager): + return post_localSGD_optimizer.PostLocalSGDOptimizer( + optim=torch.optim.SGD(net.parameters(), lr=learning_rate), + averager=averager, + ) + + def _perform_a_train_step(self, optimizer, net, loss_fn, input, target): + optimizer.zero_grad() + output = net(input) + loss = loss_fn(output, target) + loss.backward() + optimizer.step() + + def _test_post_localSGD_optimizer_step_reload( + self, create_averager, chkpt_file + ): + learning_rate = 0.03 + + net_using_post_localSGD_opt = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(DDP_NET).cuda(), device_ids=[self.rank] + ) + + averager = create_averager() + post_localSGD_opt = self._create_post_localSGD_optimizer( + net_using_post_localSGD_opt, learning_rate, averager + ) + + averager2 = create_averager() + dummy_post_localSGD_opt = self._create_post_localSGD_optimizer( + net_using_post_localSGD_opt, learning_rate, averager2 + ) + + input = torch.randn(dist.get_world_size() * 2, 2).cuda() + target = torch.randn(dist.get_world_size() * 2, 4).cuda() + loss_fn = nn.MSELoss() + + for _ in range(20): + self._perform_a_train_step( + post_localSGD_opt, + net_using_post_localSGD_opt, + loss_fn, + input, + target, + ) + + if self.rank == 0: + torch.save( + {"optimizer_state_dict": post_localSGD_opt.state_dict()}, chkpt_file + ) + + dist.barrier() + map_location = {"cuda:%d" % 0: "cuda:%d" % self.rank} + checkpoint = torch.load(chkpt_file, map_location=map_location) + dummy_post_localSGD_opt.load_state_dict(checkpoint["optimizer_state_dict"]) + + # Check that we didn't hit the trivial case + self.assertNotEqual(averager2.step, 0) + # Check if dummy averager was initialized to a correct value + self.assertEqual(averager.step, averager2.step) + + # Remove 'step' entry from a checkpoint. + # And make sure it is not in the state dictionary + del checkpoint["optimizer_state_dict"]["step"] + self.assertNotIn("step", checkpoint["optimizer_state_dict"]) + + # Check if checkpoint without a 'step' entry invokes a warning + with self.assertWarnsRegex( + expected_warning=UserWarning, + expected_regex="Loaded state dict does not contain a step counter for an averager. " + "Setting step counter to 0.", + ): + dummy_post_localSGD_opt.load_state_dict( + checkpoint["optimizer_state_dict"] + ) + + self.assertEqual(averager2.step, 0) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity(self): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_periodic_model_averager, + grad_is_view=False, + ) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity_grad_is_view(self): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_periodic_model_averager, + grad_is_view=True, + ) + + def _create_hierarchical_model_averager(self): + period_group_size_dict = OrderedDict([(2, 2), (4, dist.get_world_size())]) + return hierarchicalSGD.HierarchicalModelAverager( + period_group_size_dict=period_group_size_dict, warmup_steps=4 + ) + + @skip_if_lt_x_gpu(4) + @skip_if_odd_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity_with_hierarchical_sgd(self): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_hierarchical_model_averager, + grad_is_view=False, + ) + + @skip_if_lt_x_gpu(4) + @skip_if_odd_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity_with_hierarchical_sgd_grad_is_view( + self, + ): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_hierarchical_model_averager, + grad_is_view=True, + ) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_step_reload(self): + torch.cuda.set_device(self.rank) + with _rank_temp_file() as tmp_file: + self._test_post_localSGD_optimizer_step_reload( + self._create_periodic_model_averager, tmp_file + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_Channels_Last(self): + self._test_DistributedDataParallel_SyncBatchNorm_with_memory_format( + torch.channels_last + ) + self._test_DistributedDataParallel_SyncBatchNorm_with_memory_format( + torch.channels_last_3d + ) + + def _test_DistributedDataParallel_SyncBatchNorm_with_memory_format( + self, memory_format + ): + group, group_id, rank = self._init_global_test() + num_processes = dist.get_world_size() + local_bs = 2 + bs_offset = int(rank * 2) + global_bs = int(num_processes * 2) + + model = ONLY_SBN_NET + model_gpu = copy.deepcopy(model).cuda(rank) + model_DDP = nn.parallel.DistributedDataParallel( + model_gpu, device_ids=[rank] + ) + + shapes = [global_bs, 2, 4, 4] + ( + [] if memory_format is torch.channels_last else [4] + ) + + input_gpu = ( + torch.randn(*shapes, dtype=torch.float) + .cuda(rank) + .to(memory_format=memory_format) + ) + target_gpu = ( + torch.randn(*shapes, dtype=torch.float) + .cuda(rank) + .to(memory_format=memory_format) + ) + loss = nn.MSELoss() + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_gpu, + target_gpu, + loss, + local_bs, + rank, + global_bs, + True, + bs_offset, + dist.get_world_size(), + memory_format=memory_format, + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm(self): + group, group_id, rank = self._init_global_test() + world_size = dist.get_world_size() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + local_bs = 2 + bs_offset = int(rank * 2) + global_bs = int(world_size * 2) + + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + ) + + # test output_device + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + output_device=torch.device("cuda"), + ) + + # test device_ids + gpus = [torch.device("cuda:" + str(i)) for i in gpus] + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + output_device=torch.device("cuda"), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_No_Affine(self): + group, group_id, rank = self._init_global_test() + world_size = dist.get_world_size() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + local_bs = 2 + bs_offset = int(rank * 2) + global_bs = int(world_size * 2) + + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + affine=False, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_2D_Input(self): + group, group_id, rank = self._init_global_test() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + model = nn.BatchNorm1d(2) + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpus[0]) + + # DDP training setup + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model)) + model_DDP.cuda(gpus[0]) + model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus) + + local_bs = len(gpus) * 2 + global_bs = dist.get_world_size() * local_bs + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 2) + loss = nn.MSELoss() + + # disabling cudnn. + # SyncBatchNorm goes through native_batch_norm kernel, this avoids the + # numerical issue created by the divergent code path. + with torch.backends.cudnn.flags(False): + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpus[0]), + target.cuda(gpus[0]), + loss, + local_bs, + rank, + global_bs, + True, + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + @require_world_size(2) + def test_DistributedDataParallel_SyncBatchNorm_Single_Input_Per_Process(self): + group, group_id, rank = self._init_global_test() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + model = nn.BatchNorm1d(2) + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpus[0]) + + # DDP training setup + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model)) + model_DDP.cuda(gpus[0]) + model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus) + + local_bs = 1 + global_bs = dist.get_world_size() + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 2) + loss = nn.MSELoss() + + # disabling cudnn. + # SyncBatchNorm goes through native_batch_norm kernel, this avoids the + # numerical issue created by the divergent code path. + with torch.backends.cudnn.flags(False): + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpus[0]), + target.cuda(gpus[0]), + loss, + local_bs, + rank, + global_bs, + True, + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_Running_Value( + self, + ): + group, group_id, rank = self._init_global_test() + model = nn.parallel.DistributedDataParallel( + ONLY_SBN_NET.cuda(rank), device_ids=[rank] + ) + + input_var = [] + for i in range(dist.get_world_size()): + input_var_rank = torch.cat( + [ + torch.ones(2, 1, 10 ** (i + 1)) * (0.1 ** (i - 1)), + torch.ones(2, 1, 10 ** (i + 1)) * (0.3 ** (i - 1)), + ], + dim=1, + ) + input_var.append(input_var_rank) + + all_input_var = torch.cat( + [ + x.permute(1, 0, 2).contiguous().view(ONLY_SBN_NET.num_features, -1) + for x in input_var + ], + dim=1, + ).cuda(rank) + + for i in range(100): + y = model(input_var[rank].cuda(rank)) + y.mean().backward() + + running_mean, running_var = ( + model.module.running_mean, + model.module.running_var, + ) + torch.testing.assert_close(running_mean, all_input_var.mean(1)) + torch.testing.assert_close(running_var, all_input_var.var(1)) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_gradient(self): + group, group_id, rank = self._init_global_test() + # only do single GPU per process + gpus = [rank] + + # cpu training setup + model = BN_NET + + num_processes = dist.get_world_size() + local_bs = rank + 2 + bs_offset = int((rank + 3) * rank / 2) + global_bs = int((num_processes + 3) * num_processes / 2) + + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_half(self): + group, group_id, rank = self._init_global_test() + + model = copy.deepcopy(BN_NET) + model = model.half() + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + model = nn.parallel.DistributedDataParallel(model.cuda(rank), device_ids=[rank]) + inp = torch.randn(2, 2, dtype=torch.float16, device=torch.device(rank)) + # Check that forward/backward do not error with dtype mismatch + out = model(inp) + self.assertEqual(out.dtype, torch.float16) + out.sum().backward() + for param in model.parameters(): + self.assertEqual(param.grad.dtype, torch.float16) + + def _test_ddp_logging_data(self, is_gpu): + rank = dist.get_rank() + model_DDP = copy.deepcopy(DDP_NET) + if is_gpu: + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP.cuda(rank), device_ids=[rank] + ) + else: + model_DDP = nn.parallel.DistributedDataParallel(model_DDP) + + # dummy data initialization + local_bs = 2 + batch_size, input, target, loss = self._prepare_dummy_data(local_bs) + if is_gpu: + input = input.cuda(rank) + target = target.cuda(rank) + + model_DDP._set_ddp_runtime_logging_sample_rate(2) + + for idx in range(20): + offset = rank * local_bs + + # DDP training, DDP scatters subsets of input to nodes/GPUs + self._test_DDP_helper( + model_DDP, + input[offset : offset + local_bs], + target[offset : offset + local_bs], + loss, + 1, + ) + + self._model_step_with_zero_grad(model_DDP) + + # Verify DDP logging data is sampled as expected + # If it has ran more than 10 iterations and this is + # the sampled iteration for measuring run time stats, + # the run time stats for this idx-th iteration will not + # be zeros. + ddp_logging_data = model_DDP._get_ddp_logging_data() + if idx > 0 and (idx < 10 or idx % 2 == 0): + self.assertGreaterEqual( + ddp_logging_data.get("forward_compute_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_compute_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_comm_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_compute_time"), + ddp_logging_data.get("backward_compute_comm_overlap_time"), + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_comm_time"), + ddp_logging_data.get("backward_compute_comm_overlap_time"), + ) + self.assertEqual(ddp_logging_data.get("iteration"), idx) + elif idx > 0: + # if the idx-th iteration is not sampled to set runtime stats, + # ddp_logging_data.iteration will not be updated to current + # iteration. + self.assertNotEqual(ddp_logging_data.get("iteration"), idx) + + # Shuffle the input so that DDP input is different + input = input[torch.randperm(batch_size)] + + return model_DDP + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_ddp_logging_data_cpu(self): + def parse_env(var): + return os.environ[var] if var in os.environ else "N/A" + + dist.set_debug_level(dist.DebugLevel.INFO) + group, group_id, rank = self._init_global_test() + model_DDP = self._test_ddp_logging_data(is_gpu=False) + + ddp_logging_data = model_DDP._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("world_size"), dist.get_world_size()) + self.assertEqual(ddp_logging_data.get("rank"), dist.get_rank()) + self.assertEqual(ddp_logging_data.get("module_name"), "Net") + self.assertEqual(ddp_logging_data.get("device_ids"), "") + # output_device is -1 in default if it is not set, e.g. + # output_device of CPU training is -1. + self.assertEqual(ddp_logging_data.get("output_device"), -1) + self.assertEqual(ddp_logging_data.get("broadcast_buffers"), 1) + self.assertEqual(ddp_logging_data.get("bucket_cap_bytes"), 25 * 1024 * 1024) + self.assertEqual(ddp_logging_data.get("find_unused_parameters"), 0) + self.assertEqual(ddp_logging_data.get("gradient_as_bucket_view"), 0) + self.assertEqual( + ddp_logging_data.get("backend_name"), dist.get_backend(group_id) + ) + self.assertEqual(ddp_logging_data.get("iteration"), 18) + params = list(model_DDP.parameters()) + num_params = 0 + param_size = 0 + params = list(filter(lambda parameter: parameter.requires_grad, params)) + for p in params: + num_params += 1 + param_size += p.numel() * p.element_size() + self.assertEqual(ddp_logging_data.get("dtypes"), "float") + self.assertEqual( + ddp_logging_data.get("total_parameter_size_bytes"), param_size + ) + self.assertEqual(ddp_logging_data.get("num_parameter_tensors"), num_params) + self.assertEqual(ddp_logging_data.get("bucket_sizes"), str(param_size)) + self.assertEqual( + ddp_logging_data.get("master_port"), parse_env("MASTER_PORT") + ) + self.assertEqual( + ddp_logging_data.get("master_addr"), parse_env("MASTER_ADDR") + ) + self.assertEqual( + ddp_logging_data.get("torch_distributed_debug"), + parse_env("TORCH_DISTRIBUTED_DEBUG"), + ) + self.assertEqual( + ddp_logging_data.get("cuda_visible_devices"), + parse_env("CUDA_VISIBLE_DEVICES"), + ) + if ddp_logging_data.get("backend_name") == "gloo": + self.assertEqual( + ddp_logging_data.get("gloo_socket_ifname"), + parse_env("GLOO_SOCKET_IFNAME"), + ) + self.assertEqual( + ddp_logging_data.get("gloo_device_transport"), + parse_env("GLOO_DEVICE_TRANSPORT"), + ) + default_gloo_threads = 2 + self.assertEqual( + ddp_logging_data.get("gloo_num_threads"), + default_gloo_threads, + ) + + self.assertEqual(ddp_logging_data.get("nccl_socket_ifname"), None) + self.assertEqual(ddp_logging_data.get("nccl_blocking_wait"), None) + self.assertEqual(ddp_logging_data.get("nccl_async_error_handling"), None) + self.assertEqual(ddp_logging_data.get("nccl_debug"), None) + self.assertEqual(ddp_logging_data.get("nccl_nthreads"), None) + self.assertEqual(ddp_logging_data.get("nccl_ib_timeout"), None) + # test runtime logging fields + # Note: DETAIL debug mode logs DDP logging data to stdout and + # thus accesses std::map, which fills in a default value for the + # type if it didn't exist. + self.assertEqual(ddp_logging_data.get("unused_parameter_size", 0), 0) + self.assertEqual(ddp_logging_data.get("has_rebuilt_buckets"), 1) + self.assertEqual( + ddp_logging_data.get("rebuilt_bucket_sizes"), str(param_size) + ) + grad_ready_order = ddp_logging_data.get( + "prev_iteration_grad_ready_order_indices" + ) + expected_order = list(reversed([str(x) for x in range(3)])) + self.assertEqual(grad_ready_order, ", ".join(expected_order)) + bucket_indices = ddp_logging_data.get("rebuilt_per_bucket_param_indices") + self.assertEqual(bucket_indices, " ".join(expected_order)) + # It is hard to test accurate latency, but it can test whether the latency is + # a valid value and in the expected range. + self.assertGreaterEqual(ddp_logging_data.get("avg_forward_compute_time"), 1) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_time"), 1 + ) + self.assertGreaterEqual(ddp_logging_data.get("avg_backward_comm_time"), 1) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_comm_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + # Test host-side times are roughly in the order that we expect + fwd_host_side_time = ddp_logging_data.get("forward_compute_time_start") + bwd_comp_start_host_side_time = ddp_logging_data.get( + "backward_compute_time_start" + ) + bwd_comp_end_host_side_time = ddp_logging_data.get( + "backward_compute_time_end" + ) + bwd_comm_start_host_side_time = ddp_logging_data.get( + "backward_comm_time_start" + ) + bwd_comm_end_host_side_time = ddp_logging_data.get("backward_comm_time_end") + self.assertGreaterEqual( + bwd_comm_end_host_side_time, bwd_comm_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comm_start_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comp_end_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual(bwd_comp_start_host_side_time, fwd_host_side_time) + + # test larger net with mixed data types, verify multiple bucket sizes + model = LargeNet() + model.float() + model.fc1.double() + model_DDP = nn.parallel.DistributedDataParallel(model, bucket_cap_mb=1.5) + ddp_logging_data = model_DDP._get_ddp_logging_data() + params = list(model_DDP.parameters()) + self.assertEqual( + ddp_logging_data.get("bucket_cap_bytes"), int(1.5 * 1024 * 1024) + ) + bucket_sizes = [ + params[1].numel() * params[1].element_size(), + params[0].numel() * params[0].element_size(), + ] + self.assertEqual( + ddp_logging_data.get("bucket_sizes"), + ", ".join(str(x) for x in bucket_sizes), + ) + self.assertEqual(ddp_logging_data.get("dtypes"), "double, float") + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_ddp_logging_data_gpu(self): + group, group_id, rank = self._init_global_test() + model_DDP = self._test_ddp_logging_data(is_gpu=True) + ddp_logging_data = model_DDP._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("device_ids"), str(rank)) + self.assertEqual(ddp_logging_data.get("output_device"), rank) + grad_ready_order = ddp_logging_data.get( + "prev_iteration_grad_ready_order_indices" + ) + expected_order = list(reversed([str(x) for x in range(3)])) + self.assertEqual(grad_ready_order, ", ".join(expected_order)) + bucket_indices = ddp_logging_data.get("rebuilt_per_bucket_param_indices") + self.assertEqual(bucket_indices, " ".join(expected_order)) + # test runtime logging fields + # It is hard to test accurate latency, but it can test whether the latency is + # a valid value and in the expected range. + self.assertGreaterEqual(ddp_logging_data.get("avg_forward_compute_time"), 1) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_comm_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + # Test host-side times are roughly in the order that we expect + fwd_host_side_time = ddp_logging_data.get("forward_compute_time_start") + bwd_comp_start_host_side_time = ddp_logging_data.get( + "backward_compute_time_start" + ) + bwd_comp_end_host_side_time = ddp_logging_data.get( + "backward_compute_time_end" + ) + bwd_comm_start_host_side_time = ddp_logging_data.get( + "backward_comm_time_start" + ) + bwd_comm_end_host_side_time = ddp_logging_data.get("backward_comm_time_end") + self.assertGreaterEqual( + bwd_comm_end_host_side_time, bwd_comm_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comm_start_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comp_end_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual(bwd_comp_start_host_side_time, fwd_host_side_time) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_static_graph_api_cpu(self): + model_DDP = nn.parallel.DistributedDataParallel(DDP_NET) + expected_err = "should be called before training loop starts" + with self.assertRaisesRegex(RuntimeError, expected_err): + local_bs = 2 + batch_size, input, target, loss = self._prepare_dummy_data(local_bs) + offset = dist.get_rank() * local_bs + + # DDP training, DDP scatters subsets of input to nodes/GPUs + self._test_DDP_helper( + model_DDP, + input[offset : offset + local_bs], + target[offset : offset + local_bs], + loss, + 1, + ) + model_DDP._set_static_graph() + + # Verify error was logged in ddp_logging_data. + verify_ddp_error_logged(model_DDP, expected_err) + + @skipIfNoTorchVision + def test_SyncBatchNorm_process_group(self): + # When adopting `convert_sync_batchnorm` to convert a `nn.modules`, + # it need to recursively pass the `process_group` in the module when the `SyncBatchNorm` + # is nested in a sub-module or sub-sub-module (e.g. resnet50 in torchvision.models). + + process_ids = 0 + process_group = torch.distributed.new_group([process_ids]) + res50_model = torchvision.models.resnet50() + res50_model_sync = nn.SyncBatchNorm.convert_sync_batchnorm( + copy.deepcopy(res50_model), process_group + ) + process_group_sync = res50_model_sync.layer1[0].bn1.process_group + self.assertEqual(process_group_sync, process_group) + + def _run_reduction_test( + self, tensor, expected_tensor, op, reduction_fn=dist.all_reduce, dst=None + ): + if reduction_fn != dist.all_reduce and dst is None: + raise ValueError(f"Reduction fn {reduction_fn} must specify dst!") + if dst is not None: + reduction_fn(tensor, dst, op) + # Only destination rank tensor is expected to have final result. + if dist.get_rank() == dst: + self.assertEqual(tensor, expected_tensor) + else: + reduction_fn(tensor, op) + self.assertEqual(tensor, expected_tensor) + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(2) + def test_nccl_backend_bool_allreduce(self): + torch.cuda.set_device(self.rank) + # Run all_reduce with PRODUCT + element = self.rank % 2 == 0 + for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]: + input_tensor = torch.tensor([element, element]).to(self.rank) + self._run_reduction_test( + input_tensor, torch.tensor([False, False]).to(self.rank), op + ) + # Ensure that all ranks contributing True (cast to 1) results in the + # correct reduction. + input_tensor = torch.tensor([True, True]).to(self.rank) + expected_tensor = input_tensor.clone() + self._run_reduction_test(input_tensor, expected_tensor, op) + + # Run all_reduce with SUM + for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]: + input_tensor = torch.tensor([element, element]).to(self.rank) + self._run_reduction_test( + input_tensor, torch.tensor([True, True]).to(self.rank), op + ) + # TODO: NCCL backend does not work correctly for bitwise reduction ops + # (see https://github.com/pytorch/pytorch/issues/41362). Add tests for + # these once it is supported. + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(2) + def test_nccl_backend_bool_allgather(self): + torch.cuda.set_device(self.rank) + inp = {0: [True, True], 1: [False, True]} + input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank) + # Preserve a copy of the tensor to compare against after allgather. + input_tensor_copy = input_tensor.clone() + tensor_list = [ + torch.tensor([False, False]).to(self.rank) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, input_tensor) + + self.assertEqual(len(tensor_list), dist.get_world_size()) + for i, t in enumerate(tensor_list): + expected = torch.tensor(inp[i % 2]).to(self.rank) + self.assertEqual(t, expected) + # Ensure that the input tensor is not modified, since this collective + # does not modify its input. + self.assertEqual(input_tensor_copy, input_tensor) + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_nccl_backend_bool_reduce(self): + torch.cuda.set_device(self.rank) + inp = {0: [True, True], 1: [False, False]} + # Run reduce() with product op + for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]: + input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank) + expected = torch.tensor([False, False]).to(self.rank) + self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0) + # Ensure that all ranks contributing True (cast to 1) results in the + # correct reduction. + input_tensor = torch.tensor([True, True]).to(self.rank) + expected_tensor = input_tensor.clone() + self._run_reduction_test( + input_tensor, expected_tensor, op, dist.reduce, dst=0 + ) + + for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]: + input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank) + expected = ( + torch.tensor([True, True]).to(self.rank) + if self.rank == 0 + else input_tensor.clone() + ) + self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0) + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(2) + def test_nccl_backend_bool_broadcast(self): + tensor_size = 10 + bcast_tensor = torch.tensor( + [ + (random.random() < 0.5 if self.rank == 0 else False) + for _ in range(tensor_size) + ] + ).to(self.rank) + dist.broadcast(bcast_tensor, src=0) + # Now allgather and ensure the tensors are equal. + tensor_list = [ + torch.tensor([False for _ in range(tensor_size)]).to(self.rank) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, bcast_tensor) + expected = tensor_list[0] + for tensor in tensor_list[1:]: + self.assertEqual(tensor, expected) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_DistributedSampler_padding(self): + # Tests padding of distributed sampler. + world_size = dist.get_world_size() + + # Simulates the 'casual' dataset size + dataset_size = 100 + world_size + 1 + dataset = [torch.ones(1).to(self.rank) * i for i in range(dataset_size)] + + # Simulates the 'tiny' dataset size + dataset_tiny_size = max(world_size // 2 - 1, 1) + dataset_tiny = [ + torch.ones(1).to(self.rank) * i for i in range(dataset_tiny_size) + ] + + # Specifying drop_last=True will cause the tail of the data to be dropped. + dist_sampler = DistributedSampler(dataset=dataset, drop_last=True) + local_num_samples, local_dataset_size = ( + dist_sampler.num_samples, + dist_sampler.total_size, + ) + # The effective dataset size should be the greatest integer that is <= + # dataset_size that is divisible by the world_size. This is to ensure each + # rank processes the same number of samples. + effective_dataset_size = ( + math.ceil((dataset_size - world_size) / world_size) + if dataset_size % world_size != 0 + else dataset_size / world_size + ) + self.assertEqual(local_num_samples, effective_dataset_size) + self.assertEqual(local_dataset_size, local_num_samples * world_size) + indices_list = list(iter(dist_sampler)) + self.assertEqual(len(indices_list), local_num_samples) + + def validate_global_samples(local_num_samples): + # Ensure that each rank processes the same number of samples. + world_samples = [ + torch.LongTensor([0]).to(self.rank) for _ in range(world_size) + ] + dist.all_gather( + world_samples, torch.tensor([local_num_samples]).to(self.rank) + ) + world_samples = [sample.item() for sample in world_samples] + self.assertEqual(len(set(world_samples)), 1) + + validate_global_samples(local_num_samples) + + # drop_last=False is the default and will add additional indices to be sampled, + # increasing the effective dataset size. + dist_sampler_added_samples = DistributedSampler(dataset=dataset) + local_num_samples, local_dataset_size = ( + dist_sampler_added_samples.num_samples, + dist_sampler_added_samples.total_size, + ) + # The effective dataset size is the smallest integer that is >= dataset_size + # and divisible by the world size. + self.assertEqual(local_num_samples, math.ceil(dataset_size / world_size)) + self.assertEqual(local_dataset_size, local_num_samples * world_size) + indices_list = list(iter(dist_sampler_added_samples)) + self.assertEqual(len(indices_list), local_num_samples) + + # Ensure that each rank processes the same number of samples. + validate_global_samples(local_num_samples) + + # Ensure additional samples are padded even when + # the extremely small dataset is given. + dist_sampler_added_samples_tiny = DistributedSampler(dataset=dataset_tiny) + local_num_samples, local_dataset_size = ( + dist_sampler_added_samples_tiny.num_samples, + dist_sampler_added_samples_tiny.total_size, + ) + self.assertEqual( + local_num_samples, math.ceil(dataset_tiny_size / world_size) + ) + self.assertEqual(local_dataset_size, local_num_samples * world_size) + indices_list = list(iter(dist_sampler_added_samples_tiny)) + self.assertEqual(len(indices_list), local_num_samples) + validate_global_samples(local_num_samples) + + def _test_allgather_object(self, subgroup=None): + # Only set device for NCCL backend since it must use GPUs. + + gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy() + + backend = os.environ["BACKEND"] + if backend == "nccl": + # Case where rank != GPU device. + next_rank = (self.rank + 1) % int(self.world_size) + torch.cuda.set_device(next_rank) + + # If GPU test, add object with GPU tensor + if backend == "nccl": + gather_objects.append(Foo(torch.randn(3, 3, device=0))) + + output_gathered = [None for _ in range(dist.get_world_size())] + dist.all_gather_object( + output_gathered, + gather_objects[self.rank % len(gather_objects)], + group=subgroup, + ) + + for i, val in enumerate(output_gathered): + expected = gather_objects[i % len(gather_objects)] + self.assertEqual(val, expected) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + def test_all_gather_object_default_pg(self): + return self._test_allgather_object() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"]) + def test_all_gather_object_subgroup(self): + default = _get_default_group() + backend = dist.get_backend(default) + subgroup = dist.new_group(backend=backend) + return self._test_allgather_object(subgroup=subgroup) + + def _test_gather_object(self, pg=None): + # Ensure stateful objects can be gathered + gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy() + my_rank = dist.get_rank(pg) + + backend = os.environ["BACKEND"] + if backend == "nccl": + # Case where rank != GPU device. + next_rank = (self.rank + 1) % int(self.world_size) + torch.cuda.set_device(next_rank) + + # If GPU test, add object with GPU tensor + if backend == "nccl": + gather_objects.append(Foo(torch.randn(3, 3, device=my_rank))) + + output_gathered = [None for _ in range(dist.get_world_size(pg))] + gather_on_rank = 0 + dist.gather_object( + gather_objects[self.rank % len(gather_objects)], + object_gather_list=output_gathered + if my_rank == gather_on_rank + else None, + dst=gather_on_rank, + group=pg, + ) + if my_rank != gather_on_rank: + self.assertEqual( + output_gathered, [None for _ in range(dist.get_world_size())] + ) + else: + for i, val in enumerate(output_gathered): + expected = gather_objects[i % len(gather_objects)] + self.assertEqual(val, expected) + + # Validate errors when objects can't be pickled. + class Bar: + pass + + b = Bar() + gather_objects = [b for _ in range(dist.get_world_size())] + with self.assertRaisesRegex(AttributeError, "Can't pickle local object"): + dist.all_gather_object( + [None for _ in range(dist.get_world_size())], + gather_objects[self.rank], + group=pg, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"]) + def test_gather_object(self): + return self._test_gather_object() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"]) + def test_gather_object_subgroup(self): + default = _get_default_group() + backend = dist.get_backend(default) + subgroup = dist.new_group(backend=backend) + return self._test_gather_object(subgroup) + + def validate_net_equivalence(self, net): + # Helper to validate synchronization of nets across ranks. + net_module_states = list(net.module.state_dict().values()) + # Check that all tensors in module's state_dict() are equal. + for t in net_module_states: + tensor_list = [ + torch.zeros_like(t) for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, t) + for tensor in tensor_list: + self.assertEqual(tensor, t) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_sync_module_states(self): + # Test that after calling _sync_module_states, models across ranks + # are the same and are equal to the model on the input rank. + dim = 2 + rank = self.rank + rank_to_broadcast = 1 + # Seed to ensure that ranks are initialized with different initial models. + torch.manual_seed(rank) + model = nn.Linear(dim, dim, bias=False) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1 + ) + new_model = nn.Linear(dim, dim, bias=False).cuda(rank) + net.module = copy.deepcopy(new_model) + # Assert params are different + net_module_states = list(net.module.state_dict().values()) + for t in net_module_states: + tensor_list = [ + torch.zeros_like(t) for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, t) + for i, tensor in enumerate(tensor_list): + if i == rank: + self.assertEqual(t, tensor) + else: + # tensor from another rank should be different. + self.assertNotEqual(t, tensor) + + _sync_module_states( + module=net.module, + process_group=net.process_group, + broadcast_bucket_size=net.broadcast_bucket_size, + src=rank_to_broadcast, + params_and_buffers_to_ignore=net.parameters_to_ignore, + ) + # Now all model params should be the same. + self.validate_net_equivalence(net) + # Since the network params were broadcast from rank_to_broadcast, validate that + # they are the same as new_model on rank_to_broadcast. + if rank == rank_to_broadcast: + expected_states = new_model.state_dict().values() + for t, expected in zip(net_module_states, expected_states): + self.assertEqual(t, expected) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_grad_div_uneven_inputs(self): + # Test gradient division during training with join() API. If + # divide_by_initial_world_size=False, we scale by the effective world + # size when allreducing grads. + dim = 5 + batch = 1 + grad_scale = 50 + rank = self.rank + model = nn.Linear(dim, dim, bias=False) + inp = torch.ones(batch, dim, device=self.rank) * grad_scale + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1 + ) + n_iters = 3 + if self.rank > 0: + n_iters += 2 + + with net.join(divide_by_initial_world_size=False): + for _ in range(n_iters): + loss = net(inp).sum() + loss.backward() + # The grad is always expected_grad, since we divide by the number + # of currently active processes and inactive processes contribute + # zero gradient. If we kept dividing by static initial world + # size as processes leave, the grad would be smaller. + expected_grad = torch.ones(dim, dim, device=self.rank) * grad_scale + param = next(iter(net.parameters())) + self.assertEqual(expected_grad, param.grad) + # Avoid accumulating grads so that it's the same every iteration + net.zero_grad() + torch.cuda.synchronize(device=self.rank) + + # If divide_by_initial_world_size=True (default), we always scale grads + # by the initial world_size. + with net.join(divide_by_initial_world_size=True): + for i in range(n_iters): + loss = net(inp).sum() + loss.backward() + effective_ws = dist.get_world_size() + if i >= 3: + effective_ws -= 1 + expected_grad = ( + torch.ones(dim, dim, device=self.rank) + * grad_scale + * effective_ws + ) / dist.get_world_size() + param = next(iter(net.parameters())) + self.assertEqual(expected_grad, param.grad) + # Avoid accumulating grad so that it's the same every iteration. + net.zero_grad() + torch.cuda.synchronize(device=self.rank) + + def _test_ddp_profiling(self, profiler_ctx): + batch = 3 + dim = 10 + num_iters = 6 + torch.cuda.set_device(self.rank) + model = nn.Linear(dim, dim, bias=False) + inp = torch.rand(batch, dim, device=self.rank) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + profiler_ctx_copy = copy.deepcopy(profiler_ctx) + + with profiler_ctx as prof: + for i in range(num_iters): + loss = net(inp).sum() + loss.backward() + + all_reduce_event_name = f"{dist.get_backend()}:all_reduce" + events = get_profiling_event(all_reduce_event_name, prof) + event_count = sum(e.count for e in events) + self.assertEqual(event_count, num_iters) + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.name, all_reduce_event_name) + + broadcast_event_name = f"{dist.get_backend()}:broadcast" + broadcast_events = get_profiling_event(broadcast_event_name, prof) + event_count = sum(e.count for e in broadcast_events) + # Broadcast is called during rebuild_buckets + self.assertGreaterEqual(event_count, 1) + for event in broadcast_events: + self.assertEqual(event.name, broadcast_event_name) + + # Run DDP with profiling for a few iterations, then enable profiling + # for a single pass, and ensure it is recorded. This tests that the + # thread local state is correctly updated. + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + for i in range(3): + loss = net(inp).sum() + loss.backward() + # Now enable the profiler. + with profiler_ctx_copy as prof: + loss = net(inp).sum() + loss.backward() + + events = get_profiling_event(all_reduce_event_name, prof) + self.assertGreaterEqual(len(events), 1) + self.assertGreaterEqual(events[0].count, 1) + self.assertEqual(events[0].name, all_reduce_event_name) + for event in events: + self.assertTrue(event.is_async) + # Ensure searching unused parameters was profiled + events = get_profiling_event("search_unused_parameters", prof) + self.assertEqual(len(events), 1) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_profiling_autograd_profiler(self): + autograd_profiler_ctx = torch.autograd.profiler.profile() + return self._test_ddp_profiling(profiler_ctx=autograd_profiler_ctx) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_ddp_profiling_torch_profiler(self): + cpu_act = torch.profiler.ProfilerActivity.CPU + cuda_act = torch.profiler.ProfilerActivity.CUDA + torch_profiler_ctx = torch.profiler.profile(activities=[cpu_act, cuda_act]) + self._test_ddp_profiling(profiler_ctx=torch_profiler_ctx) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_join_model_equivalence(self): + # Verifies equivalence with model training locally and with DDP under + # the join context manager. + batch = 3 + dim = 10 + learning_rate = 0.03 + model = nn.Linear(dim, dim, bias=False) + inp = torch.rand(batch, dim, device=self.rank) + local_model = copy.deepcopy(model) + local_model = local_model.cuda(self.rank) + rank_to_iter_mapping = { + rank: 2 * (rank + 1) for rank in range(dist.get_world_size()) + } + # run local model + local_iters = sum(rank_to_iter_mapping.values()) + local_optim = torch.optim.SGD(local_model.parameters(), lr=learning_rate) + for _ in range(local_iters): + local_optim.zero_grad() + out = local_model(inp) + loss = out.sum() + loss.backward() + local_optim.step() + + # run DDP model with join API + num_iters = rank_to_iter_mapping[self.rank] + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), device_ids=[self.rank] + ) + ddp_optim = torch.optim.SGD( + model.parameters(), lr=learning_rate * dist.get_world_size() + ) + with net.join(): + for i in range(num_iters): + ddp_optim.zero_grad() + out = net(inp) + loss = out.sum() + loss.backward() + torch.cuda.synchronize(device=self.rank) + ddp_optim.step() + + # Validate model state dicts are equal + for (_, local_tensor), (_, dist_tensor) in zip( + local_model.state_dict().items(), net.module.state_dict().items() + ): + self.assertEqual(local_tensor, dist_tensor) + + def _run_uneven_inputs_test( + self, + test_case, + iteration_mapping, + find_unused_params, + ): + model = test_case.model + inp = test_case.inp + rank = self.rank + sync_interval = test_case.sync_interval + torch.cuda.set_device(rank) + # Ensure all outstanding GPU work is completed so this test runs independently. + dist.barrier() + # Bucket_cap_mb is intentionally low to test allreduce scheduling when + # there are many buckets. + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(rank), + device_ids=[rank], + bucket_cap_mb=1, + find_unused_parameters=find_unused_params, + ) + # Register hook if specified + if test_case.hook is not None: + net.register_comm_hook(test_case.state, test_case.hook) + print(f"registered hook {test_case.hook}") + + # Determine num iters for this rank via the passed in mapping. + num_iters = iteration_mapping[rank] + # If we throw when earliest rank terminates, we should ensure + # that we iterate for that minimum number of times. + num_iters_tensor = torch.tensor( + [num_iters], device=torch.cuda.current_device() + ) + dist.all_reduce(num_iters_tensor, op=dist.ReduceOp.MIN) + min_num_iters = num_iters_tensor.item() + total_iters = 0 + if test_case.throw_on_early_termination: + if min_num_iters == num_iters: + # Early termination rank(s) + exception_ctx = self.assertRaisesRegex( + RuntimeError, f"Rank {self.rank} exhausted all inputs" + ) + else: + # Non early termination rank + exception_ctx = self.assertRaisesRegex( + RuntimeError, + "Detected at least one rank that exhausted inputs.", + ) + else: + exception_ctx = nullcontext() + with exception_ctx: + with net.join( + throw_on_early_termination=test_case.throw_on_early_termination + ): + for i in range(num_iters): + # Use model.no_sync() to disable grad synchronization every + # sync_interval. + if i % sync_interval != 0: + context = net.no_sync() + else: + context = nullcontext() + with context: + if isinstance(inp, tuple): + loss = net(*inp).sum() + else: + loss = net(inp).sum() + loss.backward() + self._model_step(net) + # Ensure completion of GPU kernels (including allreduce). If the + # join API is not properly implemented, then this should hang + # since the allreduce will hang. + torch.cuda.synchronize(device=rank) + total_iters += 1 + if test_case.throw_on_early_termination: + # Ensure we iterated min_num_iters times. + self.assertEqual(total_iters, min_num_iters) + else: + # Ensure we iterated at least min_num_iters times. + self.assertGreaterEqual(total_iters, min_num_iters) + + # Ensure completion of all GPU kernels. + torch.cuda.synchronize(device=rank) + # When throwing on early rank termination, we do not + # broadcast model state from an authoritative rank. All models + # should already be in sync. + if not test_case.throw_on_early_termination: + self.assertTrue(net._authoritative_rank) + # All ranks should have agreed on the same authoritative_rank! + final_rank_tensor = torch.tensor( + [net._authoritative_rank], device=self.rank + ) + tensor_list = [ + torch.zeros_like(final_rank_tensor) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, final_rank_tensor) + max_rank = dist.get_world_size() - 1 + self.assertSetEqual( + {max_rank}, {tensor.item() for tensor in tensor_list} + ) + # Ensure that all models are the same across ranks after all have joined. + self.validate_net_equivalence(net) + # Ensure that running with DDP uneven inputs was logged. + ddp_logging_data = net._get_ddp_logging_data() + self.assertTrue(ddp_logging_data.get("join_uneven_inputs")) + dist.barrier() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_inputs_stop_iteration_sync_bn(self): + # Tests that uneven inputs join handler correctly throws StopIteration + # for models with SyncBN or general collective comm when + # throw_on_early_termination=True. + class ModelWithComm(torch.nn.Module): + def __init__(self): + super().__init__() + self.lin = nn.Linear(2, 40, bias=False) + + def forward(self, x): + x = self.lin(x) + dist.all_reduce(x) + return x + + torch.cuda.set_device(self.rank) + model_bn = BN_NET + model_bn = nn.SyncBatchNorm.convert_sync_batchnorm( + copy.deepcopy(model_bn) + ).cuda(self.rank) + comm_model = ModelWithComm().cuda(self.rank) + model_input = torch.randn(10, 2).cuda(torch.cuda.current_device()) + + for model in [model_bn, comm_model]: + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + min_num_iters = 5 + if self.rank != 0: + # Early termination rank(s) + num_iters = min_num_iters + exception_ctx = self.assertRaisesRegex( + RuntimeError, f"Rank {self.rank} exhausted all inputs" + ) + else: + # Non early termination rank + num_iters = min_num_iters * 2 + exception_ctx = self.assertRaisesRegex( + RuntimeError, + "Detected at least one rank that exhausted inputs.", + ) + n = 0 + with exception_ctx: + with model.join(throw_on_early_termination=True): + for i in range(num_iters): + loss = model(model_input).sum() + loss.backward() + self._model_step(model) + n += 1 + + self.assertEqual(n, min_num_iters) + # Verify model equivalence + self.validate_net_equivalence(model) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_inputs(self): + dim = 1000 + batch = 1 + # Create a variety of models to run uneven input tests on. + large_model = nn.Sequential( + nn.Conv2d(1, 20, 5), + nn.ReLU(), + nn.Conv2d(20, 32, 5), + nn.ReLU(), + nn.Conv2d(32, 256, 5), + nn.ReLU(), + ) + small_model = nn.Linear(dim, dim, bias=False) + bn_net = BatchNormNet() + + class UnusedParamModule(nn.Module): + def __init__(self, unused_params_rank): + super().__init__() + self.t0 = Task() + self.t1 = Task() + self.unused_params_rank = unused_params_rank + + def task_parameters(self): + return (self.t0.p, self.t1.p) + + def forward(self, x, rank): + return ( + self.t1(self.t0(x)) + if rank != self.unused_params_rank + else self.t1(x) + ) + + unjoined_rank_with_unused_params_model = UnusedParamModule(1) + joined_rank_with_unused_params_model = UnusedParamModule(0) + + rank = self.rank + models_to_test = [ + # Network with batchnorm + DDPUnevenTestInput( + name="batch_norm_net", + model=bn_net, + inp=torch.ones(batch, 2, device=rank), + sync_interval=1, + ), + DDPUnevenTestInput( + name="large_conv_model", + model=large_model, + inp=torch.ones(batch, batch, dim, dim, device=rank), + sync_interval=1, + ), + DDPUnevenTestInput( + name="small_model", + model=small_model, + inp=torch.ones(batch, dim, device=rank), + sync_interval=1, + ), + # Unused parameter test where rank that does not join early has unused params + DDPUnevenTestInput( + name="unjoined_rank_with_unused_params_model", + model=unjoined_rank_with_unused_params_model, + inp=(torch.ones(batch, 2, device=rank), rank), + sync_interval=1, + ), + # Unused parameter test where rank that does join early has unused params + DDPUnevenTestInput( + name="joined_rank_with_unused_params_model", + model=joined_rank_with_unused_params_model, + inp=(torch.ones(batch, 2, device=rank), rank), + sync_interval=1, + ), + ] + + # Test models that have hook installed. + models_with_hook = [ + DDPUnevenTestInput( + name="small_model_allreduce_hook", + model=small_model, + hook=default.allreduce_hook, + state=None, + inp=torch.ones(batch, dim, device=rank), + sync_interval=1, + ), + DDPUnevenTestInput( + name="small_model_power_sgd_hook", + model=small_model, + hook=powerSGD.powerSGD_hook, + state=powerSGD.PowerSGDState( + process_group=None, + matrix_approximation_rank=1, + # Config so that powerSGD runs immediately instead of + # allreduce. + start_powerSGD_iter=1, + warm_start=False, + use_error_feedback=False, + ), + inp=torch.ones(batch, dim, device=rank), + sync_interval=1, + ), + ] + models_to_test.extend(models_with_hook) + + # Add resnet model if we have torchvision installed. + if HAS_TORCHVISION: + resnet_model = torchvision.models.resnet50() + models_to_test.append( + DDPUnevenTestInput( + name="resnet_model", + model=resnet_model, + inp=torch.ones(1, 3, 1000, 1000), + sync_interval=1, + ) + ) + + # Test with no_sync every 2, 3, 4, ... iterations. + models_with_sync = [] + for i, test_input in enumerate(models_to_test): + models_with_sync.append( + DDPUnevenTestInput( + name=test_input.name, + model=test_input.model, + inp=test_input.inp, + sync_interval=i + 2, + ) + ) + + throw_on_early_term_tests = [] + for test_input in models_to_test: + throw_on_early_term_tests.append( + DDPUnevenTestInput( + name=test_input.name, + model=test_input.model, + inp=test_input.inp, + sync_interval=test_input.sync_interval, + throw_on_early_termination=True, + ) + ) + + models_to_test.extend(models_with_sync) + models_to_test.extend(throw_on_early_term_tests) + + # 0 iteration tests for when one process does not train model at all, so + # we must shadow the broadcast calls made when rebuilding buckets. + baseline_num_iters = [0, 5] + iteration_offsets = [2, 3, 10] + num_uneven_ranks = [1] + if dist.get_world_size() > 2: + num_uneven_ranks.append(2) + iteration_mappings = [] + # Generate rank : num_iters mappings for various uneven input scenarios. + # This includes cases where rank 0 joins early and all other ranks join + # later, and scenarios where multiple ranks join early, but at different + # iterations, and later ranks join later. + for num_early_join_ranks in num_uneven_ranks: + for baseline_iter in baseline_num_iters: + for offset in iteration_offsets: + mapping = { + rank: baseline_iter + for rank in range(0, num_early_join_ranks) + } + # if num_early_join_ranks > 1, ranks > 0 that will join early + # iterate offset//2 more times than rank 0, to test nodes + # depleting inputs at different times. + if num_early_join_ranks > 1: + for rank in mapping.keys(): + if rank > 0: + mapping[rank] += offset // 2 + mapping.update( + { + rank: baseline_iter + offset + for rank in range( + num_early_join_ranks, dist.get_world_size() + ) + } + ) + iteration_mappings.append(mapping) + + for (test_case, iteration_mapping) in itertools.product( + models_to_test, iteration_mappings + ): + if self.rank == 0: + print( + f"""Running test: {test_case.name} sync interval + {test_case.sync_interval} with iteration mapping + {iteration_mapping}""" + ) + self._run_uneven_inputs_test( + test_case, + iteration_mapping, + find_unused_params=("unused_params_model" in test_case.name), + ) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_input_join_disable(self): + # tests that if net.join() with enable=False is specified, DDP works as + # expected with even inputs. + torch.manual_seed(self.rank) + net = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1).cuda(self.rank), device_ids=[self.rank] + ) + inp = torch.ones(1) * self.rank + n_iters = 5 + world_size = dist.get_world_size() + with net.join(enable=False): + for _ in range(n_iters): + # Clear grads + grad = net.module.weight.grad + if grad is not None: + grad.requires_grad_(False) + grad.zero_() + out = net(inp) + loss = out.sum() + loss.backward() + # Validate gradients to ensure that we divide by the correct + # world_size when join mode is disabled. + expected_grad = sum(i for i in range(world_size)) / world_size + self.assertEqual(net.module.weight.grad.item(), expected_grad) + + join_config = net._join_config + self.assertFalse(join_config.enable) + self.validate_net_equivalence(net) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_input_exception(self): + # Tests that exceptions during training are correctly propagated by the + # context manager. + error_str = "Intentional error" + + class ExceptionModule(nn.Module): + def __init__(self): + super().__init__() + self.param = nn.Parameter(torch.ones(1, requires_grad=True)) + + def forward(self, _): + raise ValueError(error_str) + + exception_module = ExceptionModule() + net = torch.nn.parallel.DistributedDataParallel( + exception_module.cuda(self.rank), device_ids=[self.rank] + ) + inp = torch.ones(1) + with self.assertRaisesRegex(ValueError, error_str): + with net.join(): + out = net(inp) + loss = out.sum() + loss.backward() + + def _test_broadcast_object_list(self, group=None): + gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy() + + # Only set device for NCCL backend since it must use GPUs. + # Case where rank != GPU device. + next_rank = (self.rank + 1) % int(self.world_size) + backend = os.environ["BACKEND"] + if backend == "nccl": + torch.cuda.set_device(next_rank) + + src_rank = 0 + # If GPU test, add object with GPU tensor + if backend == "nccl": + gather_objects.append(Foo(torch.randn(3, 3, device=0))) + + if IS_FBCODE: + # Create Tensor with > 2^31 Bytes storage requirements + # Only on FBCODE as testing OOMs in OSS + gather_objects.append(Foo(torch.randn(3, 178956971))) + objects = ( + gather_objects + if self.rank == src_rank + else [None for _ in gather_objects] + ) + + # Single object test with device specified. Backend="gloo", device=cpu + if backend != "nccl": + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list( + single_obj_list, src=0, group=group, device=torch.device("cpu") + ) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Single object test with device specified. Backend="gloo", device=current_device+1 + # The test is gated by the fact GPU count is the same as world size to avoid the case + # when backend is gloo but there is no multiple GPU devices. + if backend != "nccl" and torch.cuda.device_count() == int(self.world_size): + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list( + single_obj_list, src=0, group=group, device=torch.device(next_rank) + ) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Single object test with device specified. Backend="nccl", device=current_device+1 + if backend == "nccl" and torch.cuda.device_count() == int(self.world_size): + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list( + single_obj_list, src=0, group=group, device=torch.device(next_rank) + ) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Single object test: backward compatibility with device unspecified + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list(single_obj_list, src=0, group=group) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Multiple input objects test + if self.rank != src_rank: + self.assertNotEqual(objects, gather_objects) + dist.broadcast_object_list(objects, src=0, group=group) + self.assertEqual(objects, gather_objects) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["DETAIL"]) + @unittest.skip("Test is failing, see https://github.com/pytorch/pytorch/pull/113620") + def test_broadcast_object_list(self): + return self._test_broadcast_object_list() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["DETAIL"]) + def _test_broadcast_object_list_subgroup(self): + default = _get_default_group() + backend = dist.get_backend(default) + subgroup = dist.new_group(backend=backend) + return self._test_broadcast_object_list(subgroup) + + def _test_ddp_ignore_params_arg(self, static_graph=False): + class TestModel(nn.Module): + def __init__(self, rank): + self.rank = rank + super().__init__() + self.fc1 = nn.Linear(1, 1, bias=False) + # Proxy that will be materialized to another architecture later. + # (after wrapping model with DDP) + if self.rank == 0: + self.fc2 = nn.Linear(1, 10, bias=False) + else: + self.fc2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + device_id = self.rank + # Ensure the test works for both find_unused_parameter and broadcast_buffer settings. + for (find_unused, broadcast_buffers) in itertools.product( + [False, True], [False, True] + ): + model = TestModel(self.rank).float().to(device_id) + # Note that the model can have different shape buffers if we pass + # them in to be ignored as well. + model.fc2.register_buffer( + "ignore_buffer", torch.zeros(5 + self.rank, device=self.rank) + ) + proxy_params = list(model.fc2.parameters()) + proxy_buffers = list(model.fc2.buffers()) + model_fc2_name = next( + module_name + for module_name, module in model.named_modules() + if module is model.fc2 + ) + proxy_param_names = [ + f"{model_fc2_name}.{param_name}" + for param_name, _ in model.fc2.named_parameters() + ] + proxy_buffer_names = [ + f"{model_fc2_name}.{buf_name}" + for buf_name, _ in model.fc2.named_buffers() + ] + # Specify that we should ignore proxy_params since it will be + # materialized later. + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, proxy_param_names + proxy_buffer_names + ) + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[device_id], + find_unused_parameters=find_unused, + broadcast_buffers=broadcast_buffers, + static_graph=static_graph, + ) + # Materialize new params. These are not registered in DDP and thus + # don't have autograd hooks installed on them. + ddp.module.fc2 = nn.Linear(1, 1, bias=False).to(device_id) + + # local model with the new materialized parameters. + local_model = copy.deepcopy(ddp.module).cuda(self.rank) + + inp = torch.ones(1, dtype=torch.float).to(device_id) * (self.rank + 1) + for i in range(6): + ddp(inp).sum().backward() + + local_model(inp).sum().backward() + # materialized param grad is not touched by DDP, so its grad should + # be the same as if running locally. + for materialized_param, local_param in zip( + ddp.module.fc2.parameters(), local_model.fc2.parameters() + ): + self.assertEqual(materialized_param.grad, local_param.grad) + + # fc1 parameter grad should still be different, due to allreduce. + for synced_param, local_param in zip( + ddp.module.fc1.parameters(), local_model.fc1.parameters() + ): + self.assertFalse(synced_param.grad == local_param.grad) + + # Proxy module grad should not be touched + for proxy_param in proxy_params: + self.assertTrue(proxy_param.grad is None) + + # Synchronize since we run multiple iterations of this test, to + # isolate failure hangs. + torch.cuda.synchronize(device=self.rank) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_ignore_params_arg(self): + self._test_ddp_ignore_params_arg(static_graph=False) + self._test_ddp_ignore_params_arg(static_graph=True) + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_unused_params_rebuild_buckets_exception(self): + class ToyModel(nn.Module): + def __init__(self): + super().__init__() + self.net1 = nn.Linear(10, 10, bias=False) + self.net2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + return self.net1(x) + + ddp = torch.nn.parallel.DistributedDataParallel( + ToyModel().cuda(self.rank), device_ids=[self.rank] + ) + for i in range(2): + inp = torch.rand(1, 10) + if i > 0: + # On 2nd iteration, this will fail during rebuild_buckets, + # but we should report an error regarding unused parameters + # since that is the underlying root cause. + try: + ddp(inp).sum().backward() + except RuntimeError as e: + msg = str(e) + verify_ddp_error_logged(ddp, msg) + expected_strs = [ + ddp_prev_reduction_unfinished_str, + ddp_recommend_find_unused_params_str, + ddp_outputs_not_used_in_loss_str, + ] + # In debug mode, should show parameters that weren't reduced. + # Without debug mode, should show suggestion to use debug mode. + if dist.get_debug_level() == dist.DebugLevel.OFF: + expected_strs.append(ddp_suggest_debug_mode_str) + else: + unreduced_params = ", ".join(["net2.weight"]) + expected_strs.append( + f"did not receive grad for rank {self.rank}: {unreduced_params}" + ) + for s in expected_strs: + self.assertTrue(s in msg, f"Expected {s} to be in {msg}") + self.assertFalse(ddp_find_unused_params_enabled_str in msg) + else: + self.assertFalse( + True, "DDP unused parameters error not raised." + ) + else: + ddp(inp).sum().backward() + + dist.barrier() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_shared_grad_acc_unused_params(self): + # When find_unused_parameters=True, ensure we mark unused parameters + # even if they share gradient accumulators. + class ToyModel(nn.Module): + def __init__(self): + super().__init__() + # net1, bias, and net1.bias are all unused params. + self.net1 = nn.Linear(10, 5, bias=False) + self.bias = nn.Parameter(torch.zeros(5)) + # net1.bias and self.bias are names for the same underlying + # parameter, so they share the same grad acc. This caused + # the bug reported in https://github.com/pytorch/pytorch/issues/41324. + self.net1.bias = self.bias + self.net2 = nn.Linear(10, 5) + + def forward(self, x): + return self.net2(x).sum() + + torch.cuda.set_device(self.rank) + model = ToyModel().to(torch.cuda.current_device()) + for static in [True, False]: + ddp_model = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model), + device_ids=[self.rank], + find_unused_parameters=True, + static_graph=static, + ) + inp = torch.randn(20, 10, device=self.rank) + for i in range(6): + loss = ddp_model(inp) + # To test https://github.com/pytorch/pytorch/issues/61982 + loss /= 10 + loss.backward() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_device(self): + m = nn.Linear(10, 10).to(self.rank) + expected_len = 2 + + class TensorWrapper: + __slots__ = ["t", "moved_to_gpu"] + + def __init__(self, t): + self.t = t + self.moved_to_gpu = False + + # Handlers for specific types of validation we want to do based on + # the input type. + + def tuple_and_list_validator(x): + self.assertTrue(len(x), expected_len) + self.assertEqual(1, len({t.device for t in x})) + self.assertEqual(x[0].device.index, self.rank) + return x[0] + x[1] + + def namedtuple_validator(x): + self.assertEqual(x._fields, EXPECTED_FIELDS) + self.assertEqual(x.a.device.index, x.b.device.index) + self.assertEqual(x.a.device.index, self.rank) + return x.a + x.b + + def custom_type_validator(x): + self.assertTrue(x.moved_to_gpu or (str(x.t.device) == "cpu")) + x.t = x.t.to(self.rank) + x.moved_to_gpu = True + return x.t + + def dict_validator(x): + self.assertTrue(EXPECTED_FIELDS[0] in x.keys()) + self.assertTrue(EXPECTED_FIELDS[1] in x.keys()) + self.assertEqual(1, len({t.device for t in x.values()})) + self.assertEqual(x[EXPECTED_FIELDS[0]].device.index, self.rank) + return x[EXPECTED_FIELDS[0]] + x[EXPECTED_FIELDS[1]] + + validators = { + TensorWrapper: custom_type_validator, + tuple: tuple_and_list_validator, + list: tuple_and_list_validator, + TestNamedTupleInput_0: namedtuple_validator, + TestNamedTupleInput_1: namedtuple_validator, + dict: dict_validator, + } + + class ToyModel(torch.nn.Module): + def __init__(_self): # noqa: B902 + super().__init__() + _self.lin = nn.Linear(10, 10, bias=False) + + def forward(_self, x, expected_type): # noqa: B902 + # Similar to scatter, the recursive to in the single-device + # case does not move tensors if they are in a custom type. + self.assertTrue(isinstance(x, expected_type)) + fwd_tensor = validators[expected_type](x) + return _self.lin(fwd_tensor) + + model = torch.nn.parallel.DistributedDataParallel( + ToyModel().to(self.rank), device_ids=[self.rank] + ) + + def train_iter(inp, input_type): + for _ in range(4): + out = model(inp, input_type) + out.sum().backward() + + # CPU tuple input, should be moved to the proper device before call + # to forward. + inp = tuple(torch.randn(10, 10) for _ in range(expected_len)) + train_iter(inp, tuple) + + # List CPU input, should be moved to proper device before call to + # forward. + inp = [torch.randn(10, 10) for _ in range(expected_len)] + train_iter(inp, list) + # Custom type containing tensor. The type is maintained, but the + # device is not propagated (which is what happens with scatter too) + inp = TensorWrapper(torch.randn(10, 10)) + train_iter(inp, TensorWrapper) + # NamedTuple input. The type should be maintained and tensor inputs + # should be moved to the correct device as in scatter. + batch = 5 + dim = 10 + a = torch.rand(batch, dim) + b = torch.rand(batch, dim) + + inp = TestNamedTupleInput_0(a, b) + train_iter(inp, type(inp)) + + inp = TestNamedTupleInput_1(a, b) + train_iter(inp, type(inp)) + + # dictionary input. + inp = { + EXPECTED_FIELDS[0]: a, + EXPECTED_FIELDS[1]: b, + } + train_iter(inp, type(inp)) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_namedtuple(self): + batch = 5 + dim = 10 + + a = torch.rand(batch, dim, device=self.rank) + b = torch.rand(batch, dim, device=self.rank) + + class NamedTupleModule(torch.nn.Module): + def __init__(_self): # noqa: B902 + super().__init__() + _self.lin = nn.Linear(10, 1) + + def forward(_self, input, expected_type): # noqa: B902 + # Without NamedTuple support, this would be of type tuple. + self.assertTrue( + isinstance(input, expected_type), + f"Expected type {expected_type} but got {type(input)}", + ) + self.assertEqual(input._fields, EXPECTED_FIELDS) + self.assertEqual(a, input.a) + self.assertEqual(b, input.b) + return _self.lin(torch.mul(input.a, input.b)) + + model = torch.nn.parallel.DistributedDataParallel( + NamedTupleModule().cuda(self.rank), device_ids=[self.rank] + ) + inp = TestNamedTupleInput_0(a, b) + # The following would fail if DDP does not propagate NamedTuples correctly. + model(inp, type(inp)) + + inp = TestNamedTupleInput_1(a, b) + model(inp, type(inp)) + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_control_flow_same_across_ranks(self): + # Control flow that is the same across ranks. + batch = 20 + dim = 10 + + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + ControlFlowToyModel().cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + random_input = torch.randn(batch, dim, device=self.rank) + ones_input = torch.ones(batch, dim, device=self.rank) + for i in range(6): + if i % 2 == 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + # On even iterations, 2nd param goes unused, on odd iterations, + # it is used. + local_used_map = model.reducer._get_local_used_map() + if i % 2 == 0: + expected = torch.tensor( + [world_size, 0], device=self.rank, dtype=torch.int32 + ) + else: + expected = torch.tensor( + [world_size, world_size], device=self.rank, dtype=torch.int32 + ) + + # Validate parameter usage. + variable_usage_tensor = local_used_map + self.assertEqual(variable_usage_tensor, expected) + + # Validate appropriate error message when DDP is used with + # find_unused_parameters=False. + model = torch.nn.parallel.DistributedDataParallel( + ControlFlowToyModel().cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=False, + ) + for i in range(2): + if i == 0: + loss = model(random_input).sum() + loss.backward() + else: + try: + loss = model(random_input).sum() + loss.backward() + except RuntimeError as e: + msg = str(e) + verify_ddp_error_logged(model, msg) + # 2nd linear layer is unused + unused_param_index = 1 + expected_strs = [ + ddp_prev_reduction_unfinished_str, + ddp_recommend_find_unused_params_str, + ddp_outputs_not_used_in_loss_str, + f"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}", + ] + # In debug mode, should show parameters that weren't reduced. + # Without debug mode, should show suggestion to use debug mode. + if dist.get_debug_level() == dist.DebugLevel.OFF: + expected_strs.append(ddp_suggest_debug_mode_str) + else: + unreduced_params = ", ".join(["lin2.weight"]) + expected_strs.append( + f"did not receive grad for rank {self.rank}: {unreduced_params}" + ) + for s in expected_strs: + self.assertTrue(s in msg, f"Expected {s} to be in {msg}") + self.assertFalse(ddp_find_unused_params_enabled_str in msg) + else: + self.assertFalse(True, "DDP error not raised") + + dist.barrier() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_invalid_static_graph(self): + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + ControlFlowToyModel().cuda(self.rank), + device_ids=[self.rank], + static_graph=True, + ) + random_input = torch.randn(20, 10, device=self.rank) + ones_input = torch.ones(20, 10, device=self.rank) + # unused parameter in the first iteration got used + # in second iteration. + expected_err = "Your training graph has changed in this iteration" + with self.assertRaisesRegex(RuntimeError, expected_err): + for i in range(2): + if i % 2 == 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + + verify_ddp_error_logged(model, expected_err) + + # used parameter in the first iteration got unused + # in second iteration. + with self.assertRaisesRegex( + RuntimeError, + "Expected to have finished reduction in the prior iteration " + "before starting a new one. This error indicates that your " + "training graph has changed in this iteration, " + "e.g., one parameter is used in first iteration, " + "but then got unused in the second iteration. " + "this is not compatible with static_graph set to True.\n" + "Parameter indices which did not receive grad for", + ): + for i in range(2): + if i % 2 != 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + + verify_ddp_error_logged(model, "Expected to have finished reduction") + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_control_flow_different_across_ranks(self): + # Control flow that is different across ranks. + batch = 20 + dim = 10 + + class ToyModel(nn.Module): + def __init__(self, rank): + super().__init__() + self.lin1 = nn.Linear(10, 10, bias=False) + self.lin2 = nn.Linear(10, 10, bias=False) + self.rank = rank + + def forward(self, x): + # Control-flow that is rank and input dependent for the + # model. + use_second_layer = ( + torch.equal(x, torch.ones(batch, dim, device=x.device)) + and self.rank == 1 + ) + + if use_second_layer: + return self.lin2(F.relu(self.lin1(x))) + else: + return F.relu(self.lin1(x)) + + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + ToyModel(self.rank).cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + random_input = torch.randn(batch, dim, device=self.rank) + ones_input = torch.ones(batch, dim, device=self.rank) + for i in range(6): + if i % 2 == 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + # On even iterations, 2nd param goes unused, on odd iterations, + # it is used only on rank 1. + local_used_map = model.reducer._get_local_used_map() + + if i % 2 == 0: + expected = torch.tensor( + [world_size, 0], device=self.rank, dtype=torch.int32 + ) + else: + expected = torch.tensor( + [world_size, 1], device=self.rank, dtype=torch.int32 + ) + + variable_usage_tensor = local_used_map + # Validate parameter usage. On odd iterations, 2nd param is only + # used on rank 1. + self.assertEqual(variable_usage_tensor, expected) + + # Validate appropriate error message when DDP is used with + # find_unused_parameters=False. + model = torch.nn.parallel.DistributedDataParallel( + ToyModel(self.rank).cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=False, + ) + for i in range(2): + if i == 0: + loss = model(random_input).sum() + loss.backward() + else: + try: + loss = model(random_input).sum() + loss.backward() + except RuntimeError as e: + msg = str(e) + verify_ddp_error_logged(model, msg) + unused_param_index = 1 + expected_strs = [ + ddp_prev_reduction_unfinished_str, + ddp_recommend_find_unused_params_str, + ddp_outputs_not_used_in_loss_str, + f"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}", + ] + # In debug mode, should show parameters that weren't reduced. + # Without debug mode, should show suggestion to use debug mode. + if dist.get_debug_level() == dist.DebugLevel.OFF: + expected_strs.append(ddp_suggest_debug_mode_str) + else: + unreduced_params = ", ".join(["lin2.weight"]) + expected_strs.append( + f"did not receive grad for rank {self.rank}: {unreduced_params}" + ) + for s in expected_strs: + self.assertTrue(s in msg, f"Expected {s} to be in {msg}") + self.assertFalse(ddp_find_unused_params_enabled_str in msg) + else: + self.assertFalse(True, "DDP error not raised") + + dist.barrier() + + @require_backend_is_available({"gloo"}) + def test_scatter_object_list(self): + src_rank = 0 + scatter_list = ( + COLLECTIVES_OBJECT_TEST_LIST + if self.rank == src_rank + else [None for _ in COLLECTIVES_OBJECT_TEST_LIST] + ) + world_size = dist.get_world_size() + scatter_list = scatter_list[:world_size] + i = 0 + while len(scatter_list) < world_size: + scatter_list.append(scatter_list[i]) + i += 1 + + output_obj_list = [None] + dist.scatter_object_list(output_obj_list, scatter_list, src=src_rank) + self.assertEqual( + output_obj_list[0], + COLLECTIVES_OBJECT_TEST_LIST[ + self.rank % len(COLLECTIVES_OBJECT_TEST_LIST) + ], + ) + # Ensure errors are raised upon incorrect arguments. + with self.assertRaisesRegex( + ValueError, + "Expected argument scatter_object_output_list to be a list of size at least 1.", + ): + dist.scatter_object_list([], scatter_list, src=src_rank) + + def _generate_sparse_tensors_for_bucket_assignment_test(self): + tensors = [ + torch.empty([50], dtype=torch.float), + torch.empty([25], dtype=torch.double), + torch.empty([50], dtype=torch.float), + torch.empty([25], dtype=torch.double), + torch.empty([50], dtype=torch.float), + torch.empty([25], dtype=torch.double), + ] + + tensors_sparse = [t.to_sparse() for t in tensors] + return tensors_sparse + + def _test_compute_bucket_assignment_by_size(self, use_logger): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=5) + ) + torch.cuda.set_device(self.rank) + + # Create a valid model. The constructor initializes the logger that we use later. + # We never actually use the rest of the model - we only need its logger. + net = EmbeddingNetDifferentParams(0) + net = torch.nn.parallel.DistributedDataParallel( + net.to(self.rank), + device_ids=[self.rank], + process_group=group_to_use, + ) + + # if we don't pass a logger then we can only check that an exception was thrown. + expected_err = "No support for sparse tensors." + with self.assertRaisesRegex(RuntimeError, expected_err): + tensors_sparse = ( + self._generate_sparse_tensors_for_bucket_assignment_test() + ) + if use_logger: + result = dist._compute_bucket_assignment_by_size( + tensors_sparse, [400], logger=net.logger + ) + else: + result = dist._compute_bucket_assignment_by_size( + tensors_sparse, [400] + ) + if use_logger: + verify_ddp_error_logged(net, expected_err) + + # Perform gloo-based barrier to ensure one rank doesn't exit test + # early which causes failure with Barrier.sync. + dist.barrier(group_gloo) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_compute_bucket_assignment_by_size_sparse_error_without_logger(self): + self._test_compute_bucket_assignment_by_size(use_logger=False) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_compute_bucket_assignment_by_size_sparse_error_with_logger(self): + self._test_compute_bucket_assignment_by_size(use_logger=True) + + def _determine_expected_error_verify_model_across_rank( + self, group_to_use, diff_num_params=False + ): + # When running with NCCL backend, we don't expect an error on rank 0, + # rather, it will be taken down by TORCH_NCCL_ASYNC_ERROR_HANDLING. When + # running with Gloo or with debug mode wrapper, we expect the error + # to be caught inline. + # All ranks report same error when there is a # of parameter + # mismatch since we use allgather in the impl. + if diff_num_params: + expected_err = "DDP expects same model across all ranks" + ctx = self.assertRaisesRegex(RuntimeError, expected_err) + return ctx, expected_err + + is_detail_dbg_mode = dist.get_debug_level() == dist.DebugLevel.DETAIL + if self.rank == 0: + if ( + dist.get_backend(group_to_use) == dist.Backend.NCCL + and not is_detail_dbg_mode + ): + expected_err = "caught collective operation timeout" + ctx = self.assertRaisesRegex(RuntimeError, expected_err) + else: + expected_err = None + ctx = self.assertRaises(RuntimeError) + else: + expected_err = "appears not to match" + ctx = self.assertRaisesRegex(RuntimeError, expected_err) + return ctx, expected_err + + def _test_verify_model_across_rank(self, use_logger): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=5) + ) + torch.cuda.set_device(self.rank) + ctx, expected_err = self._determine_expected_error_verify_model_across_rank( + group_to_use + ) + + # Create a valid model. The constructor initializes the logger that we use later. + net = EmbeddingNetDifferentParams(0) + net = torch.nn.parallel.DistributedDataParallel( + net.to(self.rank), + device_ids=[self.rank], + process_group=group_to_use, + ) + + # Modify the model so that the number of parameters are different for each rank. + # This will cause a RuntimeError to be thrown below in _verify_param_shape_across_processes, + # so we can check if the correct error is thrown and is logged. + # We can't do this in the constructor above otherwise the logger will + # not be properly initialized. + net.module.lin = nn.Linear(100 if self.rank == 0 else 10, 1) + + # if we pass a logger we can verify that it was logged + with ctx: + if use_logger: + _verify_param_shape_across_processes( + net.process_group, list(net.parameters()), net.logger + ) + else: + _verify_param_shape_across_processes( + net.process_group, list(net.parameters()) + ) + # Should only be run by rank 0, and blocking_wait catches and + # reports exception. + dist.barrier(group_to_use) + + # We don't check when self.rank != 0 because the logger doesn't log + # the error "Caught collective operation" as that is not thrown in the reducer. + if use_logger and self.rank != 0: + verify_ddp_error_logged(net, expected_err) + + # Perform gloo-based barrier to ensure one rank doesn't exit test + # early which causes failure with Barrier.sync. + dist.barrier(group_gloo) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_verify_model_across_rank_with_logger(self): + self._test_verify_model_across_rank(use_logger=True) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_verify_model_across_rank_without_logger(self): + self._test_verify_model_across_rank(use_logger=False) + + def _run_test_ddp_model_with_diff_params(self, ctx, net, ddp_group, group_gloo): + with ctx: + net = torch.nn.parallel.DistributedDataParallel( + net.to(self.rank), device_ids=[self.rank], process_group=ddp_group + ) + # Should only be run by rank 0, and blocking_wait catches and + # reports exception. + dist.barrier(ddp_group) + + # can't use verify_ddp_error_logged here because net was never properly constructed + + # Perform gloo-based barrier to ensure one rank doesn't exit test + # early which causes failure with Barrier.sync. + dist.barrier(group_gloo) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_ddp_model_diff_shape_across_ranks(self): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=10) + ) + torch.cuda.set_device(self.rank) + ctx, expected_err = self._determine_expected_error_verify_model_across_rank( + group_to_use + ) + # Creates network with different sized embedding table on different + # ranks. This should throw an error during DDP init. + net = EmbeddingNetDifferentParams(self.rank) + self._run_test_ddp_model_with_diff_params( + ctx, net, group_to_use, group_gloo + ) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_ddp_model_diff_num_params_across_ranks(self): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=10) + ) + torch.cuda.set_device(self.rank) + ctx, expected_err = self._determine_expected_error_verify_model_across_rank( + group_to_use, diff_num_params=True + ) + + # Creates network with diff # of param across ranks, reducer should + # recognize this and throw appropriate error. + net = EmbeddingNetDifferentParams( + self.rank, diff_num_params=(self.rank == 1) + ) + + self._run_test_ddp_model_with_diff_params( + ctx, + net, + group_to_use, + group_gloo, + ) + + def _test_output_unused_in_loss(self, module_cls, gradient_as_bucket_view): + model = module_cls() + local_net = copy.deepcopy(model) + net = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model).cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + + # Tests that certain parameters not getting gradient since the + # output is unused in loss computation is supported. Specifically, + # checks that the grads remain unchanged and are the same as local + # training. + inp = torch.randn(10, 10) + + # Ensure that if a param is not used in loss computation, its + # gradient is untouched, i.e. if it is None before it is None after, + # not zero. + if module_cls == DictOutputModule: + a, b = local_net(inp)["predictions"] + a_dist, b_dist = net(inp)["predictions"] + else: + a, b = local_net(inp) + a_dist, b_dist = net(inp) + + loss_dist = b_dist.sum() + loss_dist.backward() + + # Ensure that gradient corresponding to parameter "a" was not + # touched, i.e. it is None and matches the local grad. + if module_cls == DictOutputModule: + self.assertTrue(net.module.module.a.weight.grad is None) + self.assertEqual( + net.module.module.a.weight.grad, local_net.module.a.weight.grad + ) + else: + self.assertTrue(net.module.a.weight.grad is None) + self.assertEqual(net.module.a.weight.grad, local_net.a.weight.grad) + + saved_a_local_grad = None + saved_a_dist_grad = None + net.zero_grad() + local_net.zero_grad() + for i in range(6): + if module_cls == DictOutputModule: + a, b = local_net(inp)["predictions"] + a_dist, b_dist = net(inp)["predictions"] + else: + a, b = local_net(inp) + a_dist, b_dist = net(inp) + if i < 2: + # Use both params in loss computation. Later, "a" will go + # unused and we check to ensure DDP supports this and + # gradients remain the same as local training. + t = a @ b + t_dist = a_dist @ b_dist + loss = t.sum() + loss_dist = t_dist.sum() + else: + # Model output "a" unused in loss. + loss = b.sum() + loss_dist = b_dist.sum() + loss.backward() + loss_dist.backward() + if i == 1: + # Save grads to compare with them in next iterations. + if module_cls == DictOutputModule: + saved_a_local_grad = local_net.module.a.weight.grad + saved_a_dist_grad = net.module.module.a.weight.grad + else: + saved_a_local_grad = local_net.a.weight.grad + saved_a_dist_grad = net.module.a.weight.grad + self.assertEqual(saved_a_local_grad, saved_a_dist_grad) + elif i >= 2: + # parameter "a" of both models should be the same and not change + if module_cls == DictOutputModule: + self.assertEqual( + net.module.module.a.weight.grad, saved_a_dist_grad + ) + self.assertEqual( + local_net.module.a.weight.grad, saved_a_local_grad + ) + else: + self.assertEqual(net.module.a.weight.grad, saved_a_dist_grad) + self.assertEqual(local_net.a.weight.grad, saved_a_local_grad) + + # Verify grads are the same + for (local_param, dist_param) in zip( + local_net.parameters(), net.parameters() + ): + local_grad = local_param.grad + dist_grad = dist_param.grad + self.assertEqual(local_grad, dist_grad) + + dist.barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_output_unused_in_loss_tuple_module(self): + module_cls = UnusedParamTwoLinLayerNet + for grad_as_bucket_view in [True, False]: + self._test_output_unused_in_loss(module_cls, grad_as_bucket_view) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_output_unused_in_loss_dict_module(self): + module_cls = DictOutputModule + for grad_as_bucket_view in [True, False]: + self._test_output_unused_in_loss(module_cls, grad_as_bucket_view) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_undefined_grad_parity_unused_parameters(self): + # TODO: enable this for general training use cases: + # https://github.com/pytorch/pytorch/issues/58511. + x = torch.ones(1, 2).to(self.rank) + net = Net().to(self.rank) + local_net = copy.deepcopy(net) + net = torch.nn.parallel.DistributedDataParallel( + net, + device_ids=[self.rank], + find_unused_parameters=True, + ) + out = net(x).sum() + local_out = local_net(x).sum() + # Simulates undefined gradients. + torch._C._functions.UndefinedGrad()(out).backward() + torch._C._functions.UndefinedGrad()(local_out).backward() + for (dist_param_name, dist_param), (local_param_name, local_param) in zip( + net.named_parameters(), local_net.named_parameters() + ): + dist_grad = dist_param.grad + local_grad = local_param.grad + self.assertEqual( + dist_grad, + local_grad, + f"""DDP param {dist_param_name} with grad {dist_grad} + does not match local param {local_param_name} with grad + {local_grad}""", + ) + + def _test_different_graph_across_ranks( + self, find_unused_parameters=False, static_graph=False + ): + class ToyModel(nn.Module): + def __init__(self, rank): + super().__init__() + self.lin1 = nn.Linear(10, 10, bias=False) + self.lin2 = nn.Linear(10, 10, bias=False) + self.rank = rank + + def forward(self, x): + if self.rank == 0: + return self.lin2(F.relu(self.lin1(x))) + else: + return F.relu(self.lin1(x)) + + torch.manual_seed(31415) + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = ToyModel(self.rank).cuda(self.rank) + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=find_unused_parameters, + gradient_as_bucket_view=True, + static_graph=static_graph, + ) + random_input = torch.randn(20, 10, device=self.rank) + for i in range(10): + out = ddp_model(random_input) + loss = out.sum() + loss.backward() + return ddp_model + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_different_graph_across_ranks(self): + base_model = self._test_different_graph_across_ranks( + find_unused_parameters=True + ) + self.assertFalse( + base_model._get_ddp_logging_data().get("has_rebuilt_buckets", 0) + ) + static_model = self._test_different_graph_across_ranks(static_graph=True) + self.assertTrue( + static_model._get_ddp_logging_data().get("has_rebuilt_buckets", 0) + ) + for i, j in zip(base_model.parameters(), static_model.parameters()): + self.assertEqual(i, j) + + @require_backend_is_available({"gloo"}) + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "MacOS uses uv transport which does not have as robust error handling as tcp transport", + ) + def test_monitored_barrier_gloo(self): + tensors = [torch.ones(10) * self.rank] + # Kick off some allreduce work on all ranks + for _ in range(10): + dist.all_reduce(torch.cat(tensors)) + # Run monitored barrier and ensure it passes + timeout = timedelta(seconds=2) + dist.monitored_barrier(timeout=timeout) + # Check monitored_barrier success with wait_all_ranks=True + for _ in range(10): + dist.all_reduce(torch.cat(tensors)) + dist.monitored_barrier(timeout=timeout, wait_all_ranks=True) + # All ranks besides 1 call into barrier, rank 0 should report failure + # while others report gloo error. + failed_rank = 1 + src_rank = 0 + if self.rank == src_rank: + with self.assertRaisesRegex( + RuntimeError, f"Rank {failed_rank} failed to pass monitoredBarrier" + ): + dist.monitored_barrier(timeout=timeout) + elif self.rank != failed_rank: + # Other ranks should not pass barrier since rank 0 failed. + err_regex = ( + f"Rank {self.rank} successfully reached monitoredBarrier," + f" but received errors while waiting for send/recv from rank" + f" {src_rank}" + ) + with self.assertRaisesRegex(RuntimeError, err_regex): + dist.monitored_barrier(timeout=timeout) + + # We need a barrier since otherwise failed_rank exits too early + # and cause a timeout. + self._barrier(timeout=30) + + @require_backend_is_available({"gloo"}) + def test_monitored_barrier_gloo_subgroup(self): + # Tests that monitored_barrier works as expected on non-default + # process groups. + failed_rank = 1 + timeout = 0.1 + subgroup = dist.new_group(ranks=[0, 1]) + + if self.rank == failed_rank: + return + + if self.rank == 0: + with self.assertRaisesRegex( + RuntimeError, f"Rank {failed_rank} failed to pass monitoredBarrier" + ): + dist.monitored_barrier(subgroup, timeout) + else: + # Other ranks call into monitored_barrier, but this should be a + # noop because they are not part of the subgroup. Verify that + # there are no errors here. + dist.monitored_barrier(subgroup, timeout) + + def _test_monitored_barrier_allreduce_hang(self, wait_all_ranks): + # tests expected behavior when nonzero rank hangs. + nccl_pg = dist.new_group( + ranks=list(range(int(self.world_size))), + # provide sufficient timeout so communicators + # can be initialized in ctor. + timeout=timedelta(seconds=15), + backend=dist.Backend.NCCL, + ) + gloo_pg = dist.new_group( + ranks=list(range(int(self.world_size))), + backend=dist.Backend.GLOO, + ) + tensors = [torch.ones(10, device=self.rank) * self.rank] + # Let all ranks call allreduce first to set up communicators etc. + # Directly simulating error here will run into store issue described + # in https://github.com/pytorch/pytorch/issues/54524. + nccl_pg.allreduce(tensors).wait(timedelta(seconds=5)) + # All ranks besides 0 call into allreduce. This is to simulate a + # desync across the world, where some ranks call into + # monitored_barrier() and others are stuck in collective comm. In + # practice, we don't need TORCH_NCCL_BLOCKING_WAIT, but we use it in this + # test to ensure it exits cleanly. + if self.rank != 0: + # Can get different errors here depending on whether gloo-based + # wrapper PG is enabled or not, since with wrapper pg, it will + # fail in a collective synchronization check and not actually + # call into the nccl pg. + if dist.get_debug_level() == dist.DebugLevel.DETAIL: + err_regex = "Timed out waiting" + else: + err_regex = "caught collective operation timeout" + with self.assertRaisesRegex(RuntimeError, err_regex): + nccl_pg.allreduce(tensors).wait(timedelta(seconds=0.1)) + else: + # Rank 0 should report first (in order) timed out rank or all ranks + # depending on wait_all_ranks flag passed into monitored_barrier. + if wait_all_ranks: + rank_str = ", ".join( + [str(i) for i in range(1, int(self.world_size))] + ) + err_regex = f"Ranks {rank_str} failed to pass monitoredBarrier" + else: + expected_first_fail_rank = 1 + err_regex = f"Rank {expected_first_fail_rank} failed to pass monitoredBarrier" + monitored_barrier_timeout_seconds = timedelta(seconds=0.1) + with self.assertRaisesRegex(RuntimeError, err_regex): + gloo_pg.monitored_barrier( + monitored_barrier_timeout_seconds, wait_all_ranks=wait_all_ranks + ) + + self._barrier(timeout=30) + + @with_nccl_blocking_wait + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_monitored_barrier_allreduce_hang(self): + # tests expected behavior when nonzero rank hangs and we want to + # report first timed out rank. + self._test_monitored_barrier_allreduce_hang(wait_all_ranks=False) + + @with_nccl_blocking_wait + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_monitored_barrier_allreduce_hang_wait_all_ranks(self): + # tests expected behavior when nonzero rank hangs and we want to + # report all timed out ranks. + self._test_monitored_barrier_allreduce_hang(wait_all_ranks=True) + + @require_backend_is_available({"gloo"}) + def test_monitored_barrier_gloo_rank_0_timeout(self): + # tests error when rank 0 exhausts its given timeout. + process_group = dist.new_group(ranks=list(range(int(self.world_size)))) + timeout = timedelta(seconds=0) + if self.rank == 0: + with self.assertRaisesRegex( + RuntimeError, f"Rank {self.rank} timed out in monitoredBarrier" + ): + process_group.monitored_barrier(timeout) + + @require_backend_is_available({"gloo"}) + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "MacOS uses uv transport which does not have as robust error handling as tcp transport", + ) + def test_monitored_barrier_failure_order(self): + # Ensure that the first (in sorted order) rank is reported when + # multiple ranks fail to pass the monitored_barrier. + # TODO(#54879): Provide ability to wait and report all failed ranks + expected_first_failed_rank = 2 + timeout = timedelta(seconds=2) + src_rank = 0 + if self.rank == src_rank: + with self.assertRaisesRegex( + RuntimeError, f"Rank {expected_first_failed_rank}" + ): + dist.monitored_barrier(timeout=timeout) + elif self.rank == 1: + err_regex = ( + f"Rank {self.rank} successfully reached monitoredBarrier," + f" but received errors while waiting for send/recv from rank" + f" {src_rank}" + ) + with self.assertRaisesRegex(RuntimeError, err_regex): + dist.monitored_barrier(timeout=timeout) + + @require_backend_is_available({"gloo"}) + @skip_if_small_worldsize + def test_monitored_barrier_wait_all_ranks(self): + # Tests simple case where > 1 rank does not call into monitored + # barrier and verifies all ranks are reported by rank 0. + if self.rank == 0: + timeout = timedelta(seconds=0.1) + rank_str = ", ".join([str(i) for i in range(1, int(self.world_size))]) + err_regex = f"Ranks {rank_str} failed to pass monitoredBarrier" + with self.assertRaisesRegex(RuntimeError, err_regex): + dist.monitored_barrier(timeout=timeout, wait_all_ranks=True) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @with_dist_debug_levels(levels=["INFO"]) + @skip_if_lt_x_gpu(2) + def test_ddp_build_debug_param_to_name_mapping(self): + model = TwoLinLayerNet() + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + expected_mapping = {0: "a.weight", 1: "b.weight"} + net_params, _ = net._build_params_for_reducer() + param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params) + self.assertDictEqual(expected_mapping, param_to_name_mapping) + + # Test when DDP is used with ignored parameters. + model = TwoLinLayerNet() + # Parameters to ignore are in the format {module_name}.{param_name} + params_to_ignore = ["a.weight"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, params_to_ignore + ) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + expected_mapping = {0: "b.weight"} + net_params, _ = net._build_params_for_reducer() + param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params) + self.assertDictEqual(expected_mapping, param_to_name_mapping) + + # Test errors are raised when DDP and module parameters mismatch. + # This generally indicates a bug with DDP and is not expected to + # happen in user applications. + model = TwoLinLayerNet() + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + net_params, _ = net._build_params_for_reducer() + if self.rank == 0: + print(type(net_params[0])) + + net_params.extend( + [ + torch.nn.Parameter(torch.ones(1)), + torch.nn.Parameter(torch.ones(1)), + ] + ) + + with self.assertRaisesRegex(ValueError, "Expected param to name mapping"): + net._build_debug_param_to_name_mapping(net_params) + + net_params = net_params[:-3] + with self.assertRaisesRegex(ValueError, "Param with name"): + net._build_debug_param_to_name_mapping(net_params) + + net_params.extend( + [ + torch.nn.Parameter(torch.ones(1)), + torch.nn.Parameter(torch.ones(1)), + ] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @with_dist_debug_levels(levels=["INFO"]) + @skip_if_lt_x_gpu(2) + def test_ddp_build_debug_param_to_name_mapping_requires_grad(self): + class Net(nn.Module): + def __init__(self): + super().__init__() + self.lin = nn.Linear(10, 10) + # Is not tracked by DDP and should not show up in param to + # name mapping. + self.lin.bias.requires_grad_(False) + + def forward(self, x): + return self.lin(x) + + model = Net() + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), device_ids=[self.rank] + ) + expected_mapping = { + 0: "lin.weight", + } + net_params, _ = net._build_params_for_reducer() + param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params) + self.assertEqual(param_to_name_mapping, expected_mapping) + + def _test_ddp_multiple_nested_unused_params_error(self, ignore_sparse): + debug_mode_off = dist.get_debug_level() == dist.DebugLevel.OFF + + class SubModule(nn.Module): + def __init__(self): + super().__init__() + self.embedding_net = EmbeddingNetDifferentParams(0) + self.lin = TwoLinLayerNet() + self.bn = BatchNormNet() + self.lin_layer = nn.Linear(4, 10, bias=False) + + def forward(self, x): + x = self.bn(x) + x = self.lin_layer(x) + x = self.lin.a(x) # self.lin.b param unused + # EmbeddingNetDifferentParams entirely unused: self.embedding_net.embedding and + # self.embedding_net.lin unused. + return x + + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.sub_module = SubModule() + + def forward(self, x): + return self.sub_module(x) + + model = MyModel() + sparse_embedding_fqns = [] + if ignore_sparse: + for module_name, module in model.named_modules(): + if module == model.sub_module.embedding_net.embedding: + for parameter_name, param in module.named_parameters( + recurse=False + ): + fqn = f"{module_name}.{parameter_name}" + sparse_embedding_fqns.append(fqn) + + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, sparse_embedding_fqns + ) + unused_modules = [ + model.sub_module.embedding_net.lin, + model.sub_module.lin.b, + ] + else: + unused_modules = list(model.sub_module.embedding_net.modules()) + [ + model.sub_module.lin.b, + ] + + expected_unused_param_fqns = [] + used_param_fqns = [] # Validate that these don't mistakenly show up. + fqn_to_param_index = {} + index = 0 + for module_name, module in model.named_modules(): + for parameter_name, param in module.named_parameters(recurse=False): + fqn = f"{module_name}.{parameter_name}" + fqn_to_param_index[fqn] = index + if fqn not in sparse_embedding_fqns: + index += 1 + if module in unused_modules: + expected_unused_param_fqns.append(fqn) + else: + if ( + not ignore_sparse + or module != model.sub_module.embedding_net.embedding + ): + used_param_fqns.append(fqn) + + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + batch, dim = 10, 2 + inp = torch.ones(batch, dim) + for i in range(2): + if i == 0: + out = net(inp) + loss = out.sum() + loss.backward() + else: + try: + out = net(inp) + loss = out.sum() + loss.backward() + except RuntimeError as e: + e = str(e) + + unused_param_substr = e[e.find("did not receive grad") :] + # Validate that each unused param fully qualified name + # shows up in error logs. We do this instead of + # constructing a joined string since order of parameters + # can be different in Reducer. In addition, validate + # param indices show up as well. + for unused_param_fqn in expected_unused_param_fqns: + self.assertTrue( + unused_param_fqn in unused_param_substr + or debug_mode_off + ) + self.assertTrue( + str(fqn_to_param_index[unused_param_fqn]) + in unused_param_substr, + f"Did not find index {fqn_to_param_index[unused_param_fqn]} for {unused_param_fqn}", + ) + + # Validate that used param fqns don't show up in error + # logs. + for used_param_fqn in used_param_fqns: + self.assertFalse(used_param_fqn in unused_param_substr) + # Validate that ignored param fqns don't show up as unused + # (since DDP does not track them) + for sparse_param_fqn in sparse_embedding_fqns: + self.assertFalse(sparse_param_fqn in unused_param_substr) + else: + self.assertTrue(False, "Expected error was not raised!") + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_multiple_nested_unused_params_error(self): + self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=False) + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_multiple_nested_unused_params_err_ignore_params(self): + # Tests unused parameter reporting when DDP is configured to ignore + # certain parameters. + self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=True) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_ddp_inference(self): + # tests that DDP module can be run on a single node with no_grad + # or eval setting and there is no hang. + rank = self.rank + torch.cuda.set_device(rank) + model = Net().cuda() + local_model = copy.deepcopy(model) + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[rank], + ) + syncbn_model = nn.SyncBatchNorm( + 2, momentum=0.99, track_running_stats=False + ).cuda() + local_syncbn_model = copy.deepcopy(syncbn_model) + syncbn_model = torch.nn.parallel.DistributedDataParallel( + syncbn_model, device_ids=[rank] + ) + inp = torch.randn(10, 2, device=rank) + inp_syncbn = torch.randn(10, 2, 4, 4, device=rank) + tests = [ + (model, local_model, inp), + (syncbn_model, local_syncbn_model, inp_syncbn), + ] + for test in tests: + test_model, test_local_model, test_inp = test + if self.rank == 0: + test_model.eval() + test_local_model.eval() + for _ in range(6): + self.assertEqual( + test_model(test_inp), test_local_model(test_inp) + ) + + # Barrier since only rank 0 runs inference. Test should be + # much faster than 30s, but this is to avoid flakiness. + self._barrier(timeout=30) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + @unittest.skip("Test is failing, see https://github.com/pytorch/pytorch/pull/113620") + def test_ddp_sync_bn_training_vs_eval(self): + rank = self.rank + torch.cuda.set_device(rank) + # Need to set track_running_stats=False, when track_running_stats=True, + # bn_training is False and sync could not occur in eval model. + model = nn.SyncBatchNorm(2, momentum=0.99, track_running_stats=False).cuda( + rank + ) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank]) + # Test sync occurs in training mode. + with torch.autograd.profiler.profile() as prof: + for i in range(6): + inp = torch.randn(10, 2, 4, 4).cuda(rank) + out = model(inp) + loss = out.sum() + loss.backward() + + # SyncBN allgathers stats across all ranks, so verify call to + # all_gather in profiler. + if BACKEND == "nccl": + all_gather_calls = get_profiling_event("_all_gather_base", prof) + else: + all_gather_calls = get_profiling_event("all_gather", prof) + self.assertNotEqual([], all_gather_calls) + + # Only do inference on one rank. If SyncBN did collective stats sync, + # this would hang/error. + model_inference = model.module + if self.rank == 0: + model_inference.eval() + with torch.autograd.profiler.profile() as prof: + for i in range(6): + inp = torch.randn(10, 2, 4, 4).cuda(rank) + out = model_inference(inp) + loss = out.sum() + loss.backward() + + # Ensure sync does not occur in eval() mode. + if BACKEND == "nccl": + all_gather_calls = get_profiling_event("_all_gather_base", prof) + else: + all_gather_calls = get_profiling_event("all_gather", prof) + self.assertEqual([], all_gather_calls) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_python_error_logged(self): + # Most python exceptions in DDP are raised during init before + # reducer is constructed, so we don't have a logger in those cases. + # However, the below is one example where a python error is thrown + # after reducer is constructed. + model = TwoLinLayerNet().cuda(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + expected_err = "must be callable" + with self.assertRaisesRegex(TypeError, expected_err): + model.register_comm_hook({}, {}) + + verify_ddp_error_logged(model, expected_err) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_static_graph_nested_types(self): + # Tests for static graph training when outputs are not just tensors + # but can be (nested) tuple, list, dict, etc. + rank = self.rank + torch.cuda.set_device(rank) + + class NestedOutputModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.lin = nn.Linear(100, 1, bias=False) + + def forward(self, inp, output_type): + if output_type == "tuple": + return ( + self.lin(inp), + ( + self.lin(inp), + self.lin(inp), + ), + ) + elif output_type == "list": + return [ + self.lin(inp), + [ + self.lin(inp), + self.lin(inp), + ], + ] + elif output_type == "dict": + return { + "a": self.lin(inp), + "b": { + "c": self.lin(inp), + }, + } + + def get_loss(model_output): + loss = 0.0 + if isinstance(model_output, torch.Tensor): + return model_output.sum() + elif isinstance(model_output, dict): + for value in model_output.values(): + loss += get_loss(value) + elif isinstance(model_output, (tuple, list)): + for x in model_output: + loss += get_loss(x) + else: + raise ValueError(f"Unknown model output type {type(model_output)}") + return loss + + model = NestedOutputModule().cuda(rank) + model_static_graph = copy.deepcopy(model) + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[rank], + ) + model_static_graph = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[rank], + static_graph=True, + ) + inp = torch.randn(10, 100) + type_mapping = { + "list": list, + "tuple": tuple, + "dict": dict, + } + for output_type in type_mapping.keys(): + for i in range(6): + out = model(inp, output_type=output_type) + loss = get_loss(out) + loss.backward() + self._model_step(model) + out_static = model_static_graph(inp, output_type=output_type) + self.assertTrue(isinstance(out_static, type_mapping[output_type])) + loss_static = get_loss(out_static) + loss_static.backward() + self._model_step(model_static_graph) + for (p, p_static) in zip( + model.parameters(), model_static_graph.parameters() + ): + self.assertEqual(p, p_static) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_returns_tensor_with_no_grad(self): + # Tests case where module returns tensor that does not require grad. + torch.cuda.set_device(self.rank) + + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.fc1 = nn.Linear(10, 10, bias=False) + self.fc2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + x = self.fc2(F.relu(self.fc1(x))) + y = x.clone() + x = x.detach() + assert not x.requires_grad + return (x, y) + + model = MyModel().to(self.rank) + inp = torch.randn(1, 10, device=self.rank) + for (find_unused, static_graph) in itertools.product( + [True, False], [True, False] + ): + ddp = DistributedDataParallel( + model, + device_ids=[self.rank], + output_device=self.rank, + find_unused_parameters=find_unused, + static_graph=static_graph, + ) + for i in range(6): + out = ddp(inp) + self.assertFalse(out[0].requires_grad) + o = (out[0] + out[1]).sum() + o.backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_detect_ddp_is_actually_static(self): + class ToyModel(nn.Module): + def __init__(self): + super().__init__() + self.net1 = nn.Linear(10, 10, bias=False) + self.net2 = nn.Linear(10, 10) + + def forward(self, x, find_unused, dynamic): + if find_unused: + if dynamic: + return self.net2(self.net1(x)) + else: + return self.net2(x) + else: + return self.net2(self.net1(x)) + + # Set of unused parameters don't change across iterations + torch.cuda.set_device(self.rank) + model = ToyModel().cuda() + for find_unused in [True, False]: + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=find_unused, + ) + inp = torch.randn(1, 10, device="cuda") + for _ in range(6): + out = ddp(inp, find_unused=find_unused, dynamic=False) + loss = out.sum() + loss.backward() + self.assertTrue(ddp.reducer._ddp_graph_static()) + + # Set of unused parameters dynamically change + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=True, + ) + inp = torch.randn(1, 10, device="cuda") + for i in range(6): + out = ddp(inp, find_unused=True, dynamic=i % 2 == 0) + loss = out.sum() + loss.backward() + self.assertFalse(ddp.reducer._ddp_graph_static()) + + def _test_ddp_new_tensor_in_fwd(self, static_graph): + # Test from https://github.com/pytorch/pytorch/issues/60733 + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.fc1 = nn.Linear(10, 10, bias=False) + self.fc2 = nn.Linear(10, 10, bias=False) + self.device = self.fc1.weight.device + + def __init_opt(self): + opt = torch.randn(1, 10, device=self.device) + return opt + + def forward(self, x, opt_1, opt_2, opt_nested): + x = F.relu(self.fc1(x)) + x = self.fc2(x) + if opt_1 is None: + opt_1 = self.__init_opt() + if opt_2 is None: + opt_2 = self.__init_opt() + if opt_nested is None or not torch.is_tensor(opt_nested): + opt_nested = self.__init_opt() + # Test multiple tensors as well as newly created tensors + # within a struct. + return x, opt_1, opt_2, {"tensor": opt_nested} + + model = MyModel().to(self.rank) + for find_unused in [True, False]: + ddp = DistributedDataParallel( + model, + device_ids=[self.rank], + output_device=self.rank, + broadcast_buffers=False, + find_unused_parameters=find_unused, + static_graph=static_graph, + ) + + opt = [None for _ in range(3)] + for i in range(2): + ddp.zero_grad() + x = torch.randn(1, 10, device=self.rank) + out, opt[0], opt[1], opt[2] = ddp( + x, opt_1=opt[0], opt_2=opt[1], opt_nested=opt[2] + ) + for i in range(len(opt)): + if torch.is_tensor(opt[i]): + self.assertEqual(opt[i].grad_fn, None) + else: + self.assertEqual(opt[i]["tensor"].grad_fn, None) + out.mean().backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_new_tensor_in_fwd(self): + return self._test_ddp_new_tensor_in_fwd(static_graph=False) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_new_tensor_in_fwd_static_graph(self): + return self._test_ddp_new_tensor_in_fwd(static_graph=True) + + def _test_ddp_buffer_hook_allreduce(self, return_futures): + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + + def buffer_comm_hook(ddp, named_buffers): + buffers = [buffer for (_, buffer) in named_buffers.items()] + futs = [ + dist.all_reduce( + buffer, group=ddp.process_group, async_op=True + ).get_future() + for buffer in buffers + ] + if return_futures: + return futs + else: + torch.futures.collect_all(futs).wait() + + hook_pre_fwd = ( + torch.nn.parallel.distributed._BufferCommHookLocation.PRE_FORWARD + ) + hook_post_fwd = ( + torch.nn.parallel.distributed._BufferCommHookLocation.POST_FORWARD + ) + for hook_run_location in [ + hook_pre_fwd, + hook_post_fwd, + ]: + model = NetWithBuffers().cuda(rank) + model_ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + model_ddp._register_buffer_comm_hook( + model_ddp, buffer_comm_hook, hook_run_location + ) + model_ddp_no_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model), + device_ids=[self.rank], + broadcast_buffers=False, + ) + inp = torch.randn(2, 10, device=rank) + for i in range(2): + loss_hook = model_ddp(inp).sum() + # Since buffer reduction is done pre-forward, simulate it for + # no hook case here. + # Simulate allreduce appropriately depending on hook location. + if hook_run_location == hook_pre_fwd: + model_no_hook_buffers = list(model_ddp_no_hook.module.buffers()) + for tensor in model_no_hook_buffers: + dist.all_reduce(tensor) + + loss_no_hook = model_ddp_no_hook(inp).sum() + if hook_run_location == hook_post_fwd: + model_no_hook_buffers = list(model_ddp_no_hook.module.buffers()) + for tensor in model_no_hook_buffers: + dist.all_reduce(tensor) + torch.cuda.synchronize() + + # if return_futures, they are only awaited on by DDP + # at the end of the backwards pass for maximum overlap. + if not return_futures: + self._verify_buffers_equal(model_ddp, model_ddp_no_hook) + loss_hook.backward() + loss_no_hook.backward() + # Note that when custom hooks return futures, this + # comparison is not expected to work when hook run location + # is pre-forward pass. This is because the hook does async + # communication and forward pass modifies the buffer without + # appropriate synchronization. Therefore, if returning + # futures from custom buffer hooks, it is advised to set + # hook run location to post forward. + if return_futures and hook_run_location == hook_post_fwd: + self._verify_buffers_equal(model_ddp, model_ddp_no_hook) + dist.barrier() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_buffer_hook_allreduce_return_future(self): + self._test_ddp_buffer_hook_allreduce(return_futures=True) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_buffer_hook_allreduce(self): + self._test_ddp_buffer_hook_allreduce(return_futures=False) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_broadcast_buffer_via_hook(self): + # test that _distributed_broadcast_coalesced via registered hook is + # equivalent to DDP's default broadcast coalesced. + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + + def buffer_comm_hook(ddp, named_buffers): + # named_buffers is a Dict[str, Tensor] representing a mapping + # from buffer name to buffer. + buffers = [buffer for (_, buffer) in named_buffers.items()] + ddp._default_broadcast_coalesced(buffers) + + model = NetWithBuffers().cuda(rank) + model_ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + model_ddp._register_buffer_comm_hook(model_ddp, buffer_comm_hook) + model_ddp_no_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model), + device_ids=[self.rank], + ) + inp = torch.randn(2, 10, device=rank) + for i in range(2): + loss_hook = model_ddp(inp).sum() + loss_no_hook = model_ddp_no_hook(inp).sum() + self._verify_buffers_equal(model_ddp, model_ddp_no_hook) + loss_hook.backward() + loss_no_hook.backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_remove_autograd_hooks(self): + + class SimulateError(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + def backward(ctx, grad_output): + raise RuntimeError() + + class MyModel(nn.Module): + def __init__(self, device): + super().__init__() + self.error = True + self.fc1 = nn.Linear(10, 10).cuda(device) + + def forward(self, inp): + if self.error: + return self.fc1(SimulateError.apply(inp)) + else: + return self.fc1(inp) + + + # Run with error to trigger backward pass that marks fc1 as being marked + # ready. If we don't remove autograd hooks before running below it would + # fail on the old autograd hook. + model = MyModel(self.rank) + input = torch.rand(10, 10, requires_grad=True).cuda(self.rank) + model_ddp1 = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + + with self.assertRaises(RuntimeError): + model_ddp1(input).sum().backward() + + # Remove autograd hooks on old instance. + model_ddp1._remove_autograd_hooks() + + # Try another DDP instance without error now. + model.error = False + model_ddp2 = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + model_ddp2(input).sum().backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @unittest.skip("Test is failing, tracking issue at https://github.com/pytorch/pytorch/issues/102751") + def test_ddp_has_finalized(self): + + @dataclass + class MyClass: + obj: torch.Tensor + + class MyModel(nn.Module): + def __init__(self, rank): + super().__init__() + self.rank = rank + self.fc1 = nn.Linear(1024, 1024).cuda(rank) + self.fc2 = nn.Linear(1024, 2 * 1024).cuda(rank) + + def forward(self, inp): + if self.rank == 0: + return self.fc1(inp), MyClass(self.fc2(inp)) + else: + return self.fc1(inp), self.fc2(inp) + + model = MyModel(self.rank) + input = torch.rand(10, 1024, requires_grad=True).cuda(self.rank) + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=True, + bucket_cap_mb=(1024 * 4 / 1024 / 1024), # One bucket per parameter. + ) + + if self.rank == 0: + out1, _ = ddp(input) + out1.sum().backward() + else: + out1, out2 = ddp(input) + (out1.sum() + out2.sum()).backward() + + if self.rank == 0: + with self.assertRaisesRegex(RuntimeError, "Expected to have finished reduction in the prior iteration"): + ddp._check_reducer_finalized() + + with self.assertRaisesRegex(RuntimeError, "Expected to have finished reduction in the prior iteration"): + ddp(input) + else: + ddp._check_reducer_finalized() + ddp(input) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", + "TORCH_NCCL_USE_COMM_NONBLOCKING only applies to NCCL" + ) + def test_nccl_init_abort(self): + """ + Tests that we can abort a NCCL communicator during initialization and + recover appropriately. + """ + # Reinitialize global process group with TORCH_NCCL_USE_COMM_NONBLOCKING=1 + os.environ["TORCH_NCCL_USE_COMM_NONBLOCKING"] = "1" + dist.destroy_process_group() + timeout = timedelta(seconds=1) + dist.init_process_group( + init_method=INIT_METHOD, + backend=BACKEND, + world_size=int(os.environ["WORLD_SIZE"]), + rank=self.rank, + timeout=timeout, + ) + + # Abort pg in background thread. + running = True + + def abort(device): + pg = _get_default_group() + while running: + pg._get_backend(torch.device(device))._shutdown() + time.sleep(1) + + if self.rank != 1: + import threading + t = threading.Thread(target=abort, args=(self.rank,)) + t.start() + with self.assertRaises(RuntimeError): + # First collective triggers initialization via ncclCommInitRank. + torch.distributed.barrier() + running = False + t.join() + + def _run_ddp_update_process_group(self, new_pg): + def get_num_torch_recompiles(): + guard_failures = torch._dynamo.utils.guard_failures + num_recompiles = [len(guard_failures[code]) for code in guard_failures] + return 0 if len(num_recompiles) == 0 else max(num_recompiles) + + class SimulateError(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + def backward(ctx, grad_output): + raise RuntimeError() + + class MyModel(torch.nn.Module): + def __init__(self, device): + super().__init__() + # 4MB for multiple buckets. + self.fc1 = torch.nn.Linear(1024, 1024).cuda(device) + self.fc2 = torch.nn.Linear(1024, 1024).cuda(device) + self.fc3 = torch.nn.Linear(1024, 1024).cuda(device) + + def forward(self, inp, error): + if error: + return self.fc3(self.fc2(self.fc1(SimulateError.apply(inp)))) + else: + return self.fc3(self.fc2(self.fc1(inp))) + + + input = torch.rand(10, 1024, requires_grad=True).cuda(self.rank) + ddp = torch.nn.parallel.DistributedDataParallel( + MyModel(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + bucket_cap_mb=1, + ) + model = torch.compile(ddp) + + def run_iteration(): + # Run regular iteration. + out = model(input, error=False) + out.sum().backward() + torch.cuda.synchronize() + + # Run with error. + with self.assertRaises(RuntimeError): + out = model(input, error=True) + out.sum().backward() + torch.cuda.synchronize() + + run_iteration() + assert 0 == get_num_torch_recompiles() + + if new_pg: + # Now reduce world_size and run iteration. + group_size_2 = dist.new_group(ranks=[0, 1]) + ddp._update_process_group(group_size_2) + if self.rank in [0, 1]: + run_iteration() + + # Increase the world size and run iteration. + group_size_3 = dist.new_group(ranks=[1, 2, 3]) + ddp._update_process_group(group_size_3) + if self.rank in [1, 2, 3]: + run_iteration() + + # Back to default size. + ddp._update_process_group(_get_default_group()) + run_iteration() + else: + # Create default pg of smaller size. + dist.destroy_process_group() + + if self.rank in [1, 2, 3]: + dist.init_process_group( + init_method=self.init_method, + backend=BACKEND, + world_size=3, + rank=self.rank - 1, + timeout=timedelta(seconds=default_pg_timeout), + ) + ddp._update_process_group(_get_default_group()) + run_iteration() + dist.destroy_process_group() + + # Need a barrier here to ensure ranks 1, 2 and 3 are done. + self._barrier(wait_for=4) + + # Need to init pg again for "_barrier" to succeed. + dist.init_process_group( + init_method=self.init_method, + backend=BACKEND, + world_size=4, + rank=self.rank, + timeout=timedelta(seconds=default_pg_timeout), + ) + + # Validate no more recompiles. + assert 0 == get_num_torch_recompiles() + + @skip_if_lt_x_gpu(4) + @require_world_size(4) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_update_process_group_new_group(self): + self._run_ddp_update_process_group(new_pg=True) + + @skip_if_lt_x_gpu(4) + @require_world_size(4) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_update_process_group_default_group(self): + self._run_ddp_update_process_group(new_pg=False) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_broadcast_buffer(self): + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + + class NetWithBuffers(nn.Module): + def __init__(self): + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 1, bias=False) + self.register_buffer("buffer", torch.randn(1, 2)) + + def forward(self, x): + return self.b(self.a(x)) + + model = NetWithBuffers().cuda(rank) + model_ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + inp = torch.randn(2, 10, device=rank) + for i in range(2): + if rank == 0: + model_ddp.module.buffer = model_ddp.module.buffer + 1 + loss = model_ddp(inp).sum() + loss.backward() + # Ensure all buffers are synchronized. + bufs = [ + torch.empty_like(model_ddp.module.buffer) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(bufs, model_ddp.module.buffer) + rank_0_buf = bufs[0] + for buf in bufs[1:]: + self.assertEqual(rank_0_buf, buf) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" and BACKEND != "gloo", + "Only Nccl & Gloo backend support DistributedDataParallel", + ) + def test_static_graph_multi_forward(self): + class Net(nn.Module): + def __init__(self): + super().__init__() + self.lin = nn.Linear(10, 10) + self.relu = nn.ReLU() + + def forward(self, x): + return self.relu(self.lin(x)) + + torch.cuda.set_device(self.rank) + torch.manual_seed(42 << 1337 % (self.rank + 1)) + model = Net().cuda(self.rank) + local_model = copy.deepcopy(model) + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank], static_graph=True + ) + inp = torch.ones(2, 10, device="cuda") + for _ in range(3): + model.zero_grad() + local_model.zero_grad() + a = model(inp) + b = model(inp) + loss = a.sum() + b.sum() + loss.backward() + # Grads should be equal to a local model that ran through inp twice and averaged grads + if self.rank == 0: + inp_clone = inp.clone() + for _ in range(2): + a = local_model(inp_clone) + b = local_model(inp_clone) + loss = a.sum() + b.sum() + loss.backward() + + ws = dist.get_world_size() + for p in local_model.parameters(): + p.grad.data = p.grad / dist.get_world_size() + + for p_ddp, p_local in zip( + model.parameters(), + local_model.parameters() + ): + self.assertTrue( + torch.allclose( + p_ddp.grad, p_local.grad + ), + f"{p_ddp.grad} vs {p_local.grad}" + ) + + dist.barrier() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" and BACKEND != "gloo", + "Only Nccl & Gloo backend support DistributedDataParallel", + ) + def test_sync_bn_logged(self): + model = BN_NET + rank = self.rank + # single gpu training setup + model_gpu = model.cuda(rank) + no_sync_bn = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model_gpu), + device_ids=[self.rank], + ) + ddp_logging_data = no_sync_bn._get_ddp_logging_data() + sync_bn_logged = ddp_logging_data.get("has_sync_bn", True) + self.assertFalse(sync_bn_logged) + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(model_gpu) + model_DDP = torch.nn.parallel.DistributedDataParallel( + model_DDP, + device_ids=[self.rank], + ) + ddp_logging_data = model_DDP._get_ddp_logging_data() + sync_bn_logged = ddp_logging_data.get("has_sync_bn", False) + self.assertTrue(sync_bn_logged) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_stateless_api_with_ddp(self): + class MockModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.l1 = torch.nn.Linear(1, 1) + buffer = torch.ones(1) + self.register_buffer("buffer", buffer) + + def forward(self, x): + return self.l1(x) + self.buffer + + device = self.rank + module = MockModule().to(device) + module = torch.nn.parallel.DistributedDataParallel( + module, device_ids=[device] + ) + x = torch.rand((1, 1)).to(device) + weight = torch.tensor([[1.0]], device=device, requires_grad=True) + bias = torch.tensor([0.0], device=device, requires_grad=True) + buffer = torch.tensor([0.0], device=device) + parameters = { + "module.l1.weight": weight, + "module.l1.bias": bias, + "module.buffer": buffer, + } + prev_weight = module.module.l1.weight.clone() + prev_buffer = module.module.buffer.clone() + + res = torch.func.functional_call(module, parameters, x) + self.assertEqual(x, res) + # check that the weight remain unmodified + cur_weight = module.module.l1.weight + cur_buffer = module.module.buffer + self.assertEqual(cur_weight, prev_weight) + self.assertEqual(cur_buffer, prev_buffer) + # run a backward pass and check the gradients + res.backward() + self.assertIsNotNone(weight.grad) + self.assertIsNotNone(bias.grad) + # Gradient was not calculated for the module stated and buffers + self.assertIsNone(buffer.grad) + self.assertIsNone(module.module.l1.weight.grad) + self.assertIsNone(module.module.l1.bias.grad) + self.assertIsNone(module.module.buffer.grad) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_forward_backward_hook(self): + class DummyTestModel(nn.Module): + def __init__(self): + super().__init__() + torch.manual_seed(0) + self.fc = nn.Linear(2, 2) + + def forward(self, x): + return self.fc(x) + + def relu_hook(module, input): + return nn.functional.relu(input[0]) + + def gelu_hook(module, _input, output): + return nn.functional.gelu(output) + + def celu_hook(module, _input, output): + return (nn.functional.celu(output[0]),) + + local_model = DummyTestModel() + ddp_model = DummyTestModel() + local_model.fc.register_forward_pre_hook(relu_hook) + local_model.fc.register_forward_hook(gelu_hook) + ddp_model.fc.register_forward_pre_hook(relu_hook) + ddp_model.fc.register_forward_hook(gelu_hook) + local_model.fc.register_backward_hook(celu_hook) + ddp_model.fc.register_backward_hook(celu_hook) + ddp_model = DistributedDataParallel( + ddp_model.to(self.rank), device_ids=[self.rank] + ) + input_data = torch.rand(5, 2) + output_local = local_model(input_data) + output_ddp = ddp_model(input_data.to(self.rank)) + self.assertEqual(output_local, output_ddp) + output_local.sum().backward() + output_ddp.sum().backward() + ddp_grads = [p.grad for p in ddp_model.parameters()] + self.assertEqual(ddp_grads[0], local_model.fc.weight.grad) + self.assertEqual(ddp_grads[1], local_model.fc.bias.grad) + + def _test_hook_pickling(self, hook, hook_state): + torch.manual_seed(0) + learning_rate = 0.01 + chkpt_file = tempfile.gettempdir() + "/checkpoint.pt" + rank = self.rank + + input = torch.randn(7, 1, device=rank) + target = torch.randn(7, 5, device=rank) + net = torch.nn.Linear(1, 5).to(rank) + ddp_model = DistributedDataParallel(copy.deepcopy(net), device_ids=[rank]) + dummy_ddp_model = DistributedDataParallel( + copy.deepcopy(net), device_ids=[rank] + ) + optimizer = torch.optim.SGD(ddp_model.parameters(), lr=learning_rate) + ddp_model.register_comm_hook(hook_state, hook) + ddp_model.train() + + for _ in range(10): + optimizer.zero_grad() + out = ddp_model(input) + loss = F.mse_loss(out, target) + loss.backward() + optimizer.step() + + state = { + "state_dict": ddp_model.state_dict(), + "comm_hook": hook, + "comm_hook_state": hook_state, + } + + if rank == 0: + with self.assertLogs("torch.distributed") as captured: + torch.save(state, chkpt_file) + + # Check that the logger has only one entry + self.assertEqual(len(captured.records), 1) + # Check that the logger has an expected entry + self.assertEqual( + captured.records[0].getMessage(), + "NOTE: Process group is not serializable and excluded from a saved state.", + ) + + dist.barrier() + map_location = {"cuda:%d" % 0: "cuda:%d" % rank} + with self.assertLogs("torch.distributed") as captured: + checkpoint = torch.load(chkpt_file, map_location=map_location) + + # Check that the logger has only one entry + self.assertEqual(len(captured.records), 1) + # Check that the logger has an expected entry + self.assertEqual( + captured.records[0].getMessage(), + "NOTE: Process group will be set to a default group (i.e. the world size).\ + If a different group is desired, please set `self.process_group` after PowerSGD state is loaded.", + ) + + dummy_ddp_model.load_state_dict(checkpoint["state_dict"]) + dummy_hook = checkpoint["comm_hook"] + dummy_hook_state = checkpoint["comm_hook_state"] + dummy_optimizer = torch.optim.SGD( + dummy_ddp_model.parameters(), lr=learning_rate + ) + + # Check that loaded function is correct + self.assertEqual(dummy_hook.__qualname__, hook.__qualname__) + + # Check that all slots' keys were restored correctly + self.assertEqual(hook_state.__slots__, dummy_hook_state.__slots__) + + # Check that all slots' attributes are restored correctly + # Excluding ``process_group`` and ``rng``. + for entry in dummy_hook_state.__slots__: + if entry != "process_group" and entry != "rng": + self.assertEqual( + getattr(dummy_hook_state, entry), getattr(hook_state, entry) + ) + + # Check that ``process_group`` was set to default + self.assertEqual(dummy_hook_state.process_group, _get_default_group()) + + # Check that a random state was restored properly: + # ``np.random.RandomState.get_state`` returns a tuple with entries: + # ``bit_generator`` - str, + # ``state.key`` - ndarray dtype[uint32], + # ``state.pos`` - int, + # ``has_gauss`` - int, + # ``gauss`` - float + # (refer to https://github.com/numpy/numpy/blob/266aad7478bc7fbcc55eea7f942a0d373b838396/numpy/random/mtrand.pyi) + # To make sure random state was restored properly, all entries should equal the original + for entry1, entry2 in zip( + hook_state.rng.get_state(), dummy_hook_state.rng.get_state() + ): + np.testing.assert_array_equal(entry1, entry2) + + dummy_ddp_model.register_comm_hook(dummy_hook_state, dummy_hook) + dummy_ddp_model.train() + + for _ in range(10): + optimizer.zero_grad() + dummy_optimizer.zero_grad() + out_origin = ddp_model(input) + out_dummy = dummy_ddp_model(input) + loss_origin = F.mse_loss(out_origin, target) + loss_dummy = F.mse_loss(out_dummy, target) + loss_origin.backward() + loss_dummy.backward() + optimizer.step() + dummy_optimizer.step() + + # Check that gradients after 10 epochs are the same + for orig_param, dummy_param in zip( + ddp_model.parameters(), dummy_ddp_model.parameters() + ): + self.assertEqual(orig_param.grad, dummy_param.grad) + + dist.barrier() + if rank == 0: + os.remove(chkpt_file) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + def test_ddp_hook_pickling_powerSGD(self): + + hook = powerSGD.powerSGD_hook + powersgd_state = powerSGD.PowerSGDState( + process_group=None, + matrix_approximation_rank=1, + start_powerSGD_iter=4, + ) + self._test_hook_pickling(hook, powersgd_state) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_device_mesh_initialization(self): + """ + Test DDP with device_mesh initialization. + """ + world_size = int(os.environ["WORLD_SIZE"]) + + from torch.distributed.device_mesh import init_device_mesh + device_mesh = init_device_mesh("cuda", (world_size,)) + + pg = _get_default_group() + + torch.cuda.set_device(self.rank) + model = TwoLinLayerNet().cuda() + ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_mesh=device_mesh) + self.assertEqual(ddp_model.device_mesh, device_mesh) + self.assertEqual(ddp_model.device_mesh.get_group(mesh_dim=0), pg) + + with self.assertRaisesRegex( + RuntimeError, "Cannot specify both process_group and device_mesh arguments." + ): + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, process_group=pg, device_mesh=device_mesh + ) + + with self.assertRaisesRegex( + RuntimeError, "Only 1D device mesh is supported," + ): + device_mesh = init_device_mesh("cuda", (2, world_size // 2)) + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, device_mesh=device_mesh + ) + + @skip_if_lt_x_gpu(2) + @require_world_size(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_compile_static_graph(self): + "Tests that DDP works with torch compile when static_graph=True" + model = torch.nn.Linear(10, 10).cuda(self.rank) + model_clone = copy.deepcopy(model) + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + ddp_static = torch.nn.parallel.DistributedDataParallel( + model_clone, + device_ids=[self.rank], + static_graph=True + ) + ddp = torch.compile(ddp) + ddp_static = torch.compile(ddp_static) + input = torch.rand(10, 10).cuda(self.rank) + # verify output and gradient parity + for _ in range(6): + out_ddp = ddp(input).sum() + out_ddp_static = ddp_static(input).sum() + self.assertEqual(out_ddp, out_ddp_static) + out_ddp.backward() + out_ddp_static.backward() + for p1, p2 in zip(ddp.parameters(), ddp_static.parameters()): + self.assertEqual(p1.grad, p2.grad) + + +instantiate_parametrized_tests(DistributedTest._DistTestBase) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f76533c39e6fe4950cfdf36cf094ac83e58e2877 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py @@ -0,0 +1,64 @@ +from contextlib import contextmanager +from datetime import timedelta +from functools import ( + partial, + wraps, +) + +import torch.distributed as dist +import torch.distributed.distributed_c10d as c10d + +class MockProcessGroup(dist.ProcessGroup): + + def __init__(self, rank, world): + super().__init__(rank, world) + + def getBackendName(self): + return "mock_process_group" + +def create_mock_pg(prefix_store, rank, world_size, timeout): + return MockProcessGroup(rank, world_size) + +dist.Backend.register_backend('mock_process_group', create_mock_pg) + +def mock_init_dist(rank, world_size): + # !!! WARNING !!! + # Kids don't try this at home, this is a cute pile of hacks that + # depends on a small mountain of c10d internals + assert not dist.is_initialized() + store = dist.HashStore() + # Trick _store_based_barrier into believing everyone else already checked-in + # Zero is the group index + store.add(f"{c10d.STORE_BASED_BARRIER_PREFIX}:0", world_size - 1) + dist.init_process_group( + backend="mock_process_group", + rank=rank, + world_size=world_size, + store=store, + group_name="fake", + timeout=timedelta(seconds=1)) + +@contextmanager +def with_dist(rank=0, world_size=2): + """ + Context manager that initializer c10d with a fake process group. + """ + mock_init_dist(rank=rank, world_size=world_size) + try: + yield + finally: + dist.destroy_process_group() + +def with_fake_comms(func=None, rank=0, world_size=2): + """ + Function wrapper that inits a fake process group designed for testing. + Right now only querying for world size is available + """ + if func is None: + return partial(with_fake_comms, rank=rank, world_size=world_size) + + @wraps(func) + def wrapper(self, *args, **kwargs): + with with_dist(rank, world_size): + func(self, *args, **kwargs) + return wrapper diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py new file mode 100644 index 0000000000000000000000000000000000000000..3c827913cbb34fabced6179f5113c3c056373813 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py @@ -0,0 +1,144 @@ +import torch.distributed as dist + +from torch._C._distributed_c10d import ( + _create_work_from_future, + AllgatherOptions, + AllreduceOptions, + BarrierOptions, + ReduceScatterOptions, + BroadcastOptions, + ScatterOptions, + AllToAllOptions +) +from torch.futures import Future + +from typing import List +from torch import Tensor + + +def ret_work(ret): + fut = Future() + fut.set_result(ret) + return _create_work_from_future(fut) + + +class FakeProcessGroup(dist.ProcessGroup): + """ + A fake process group (not related to FakeTensor) is a process group which + doesn't actually do any communication, it just hallucinates some + communication. You can run a single rank with a fake process group + without needing multiple processes (simulates per-rank behavior) + + NOTE: This is not a real process group, and it would produce wrong results + for every collective. It should be used as a convinient tool when playing + with distributed but don't care about the actual data. + """ + def __init__(self, rank, world_size): + super().__init__(rank, world_size) + self._rank = rank + self._world_size = world_size + + def allreduce(self, tensor_list, opts=AllreduceOptions()): + return ret_work(tensor_list) + + def allreduce_coalesced(self, tensor_list, opts=AllreduceOptions()): + return ret_work(tensor_list) + + def allgather(self, output_tensors, input_tensor, opts=AllgatherOptions()): + # NOTE: in general it's not good form to try to make FakePG work with 'real data', + # but the reasoning here is that we want FakePG to work with DeviceMesh's init + # code that have the data validation, which makes it worth the tradeoff. + # In general user should use MTPG or normal PG for cases where they may care about + # real data from collectives + for chunk in output_tensors[0]: + chunk.copy_(input_tensor[0]) + return ret_work(output_tensors) + + def reduce_scatter(self, output_tensor, scatter_list, opts=ReduceScatterOptions()): + return ret_work(output_tensor) + + def _allgather_base(self, output_tensor, input_tensor, opts=AllgatherOptions()): + # assume each rank have the same input tensor so we just copy to the results + # since it's not a real allgather, we simply make this copying logic to let + # some simple validation works (i.e. calling allgather to see if each rank have + # the same tensor or not) + # NOTE: in general it's not good form to try to make FakePG work with 'real data', + # but the reasoning here is that we want FakePG to work with DeviceMesh's init + # code that have the data validation, which makes it worth the tradeoff. + # In general user should use MTPG or normal PG for cases where they may care about + # real data from collectives + chunks = output_tensor.chunk(self._world_size) + for chunk in chunks: + chunk.copy_(input_tensor) + return ret_work(output_tensor) + + def _reduce_scatter_base(self, output_tensor, input_tensor, opts=ReduceScatterOptions()): + return ret_work(output_tensor) + + def barrier(self, opts=BarrierOptions()): + # it should be no-op for fake pg + pass + + def broadcast(self, tensors: List[Tensor], opts=BroadcastOptions()): + return ret_work(tensors) + + def scatter( + self, + output_tensors: List[Tensor], + input_tensors: List[List[Tensor]], + opts=ScatterOptions(), + ): + return ret_work(output_tensors) + + def alltoall( + self, + output_tensors: List[Tensor], + input_tensors: List[Tensor], + opts=AllToAllOptions(), + ): + return ret_work(output_tensors) + + def alltoall_base( + self, + output_tensor: Tensor, + input_tensor: Tensor, + output_split_sizes: List[int], + input_split_sizes: List[int], + opts=AllToAllOptions(), + ): + return ret_work(output_tensor) + + def send( + self, + tensors: List[Tensor], + dstRank: int, + tag: int, + ): + return ret_work(None) + + def recv( + self, + tensors: List[Tensor], + srcRank: int, + tag: int, + ): + return ret_work(tensors) + + def getBackendName(self): + return "fake" + + def __repr__(self): + return f"FakePG world_size:{self._world_size} rank:{self._rank}" + + +class FakeStore(dist.Store): + """ + A fake store is a fake Key-Value store simply for initialization usage + the of fake process group, one can either use FakeStore or HashStore. + """ + pass + +def _create_fake_pg(prefix_store, rank, world_size, timeout): + return FakeProcessGroup(rank, world_size) + +dist.Backend.register_backend("fake", _create_fake_pg, devices=['cpu', 'cuda']) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py new file mode 100644 index 0000000000000000000000000000000000000000..8ea8f1a8cd39be283880d61d2cf78dbbaa3fb039 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py @@ -0,0 +1,473 @@ +import sys +import threading +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple, Union +from functools import partial, reduce + +import torch +import torch.distributed as dist +import weakref +from torch._C._distributed_c10d import ( + _create_work_from_future, + AllgatherOptions, + AllreduceOptions, + AllToAllOptions, + BarrierOptions, + BroadcastOptions, + ReduceScatterOptions, + ScatterOptions, + Store, + ReduceOp, +) +from torch.distributed.distributed_c10d import _CollOp, _store_based_barrier, P2POp +from torch.futures import Future +from torch.utils import _pytree as pytree + +""" +TODO: +Lots of missing collectives. +Collectives validation. +Make timeout robust by making collectives respect the test deadline. +Make tests robust by making collectives interruptible. +We need some synchronization around cleanup to ensure that timedout ranks don't cause spurious failures. + +""" + + +def flatten_list(lst): + return pytree.tree_leaves(lst) + + +def ret_work(ret): + fut = Future() + fut.set_result(ret) + return _create_work_from_future(fut) + +def binop_reduce(tensors, op): + res = op(torch.stack(tensors), dim=0) + if isinstance(res, torch.Tensor): + return res + # min/max return a namedtuple + return res.values + +def bitwise_reduce(tensors, op): + return reduce(op, tensors) + +_reduce_ops = { + ReduceOp.SUM: partial(binop_reduce, op=torch.sum), + ReduceOp.AVG: partial(binop_reduce, op=torch.mean), + ReduceOp.PRODUCT: partial(binop_reduce, op=torch.prod), + ReduceOp.MIN: partial(binop_reduce, op=torch.min), + ReduceOp.MAX: partial(binop_reduce, op=torch.max), + ReduceOp.BAND: partial(bitwise_reduce, op=torch.bitwise_and), + ReduceOp.BOR: partial(bitwise_reduce, op=torch.bitwise_or), + ReduceOp.BXOR: partial(bitwise_reduce, op=torch.bitwise_xor), +} + +class AllToAll: + @torch.no_grad() + def work(self, data): + world_size = len(data) + for dest_rank in range(world_size): + output_tensor_list, _ = data[dest_rank] + for src_rank in range(world_size): + _, input_tensor_list = data[src_rank] + output_tensor_list[src_rank].copy_(input_tensor_list[dest_rank]) + +class AllReduce: + def __init__(self, op): + if op.op not in _reduce_ops: + raise NotImplementedError( + f"AllReduce op {op.op} not supported on multithreaded pg for now." + ) + self.op = op.op + + @torch.no_grad() + def work(self, data): + for i in range(len(data[0])): + tensors = [] + # use rank0 as the device for sum + rank_0_device = data[0][i].device + # collect all data to the list and make them + # all on rank 0 device + for src_rank in range(0, len(data)): + tensors.append(data[src_rank][i].to(rank_0_device)) + + # now mimic reduce across all ranks + res = _reduce_ops[self.op](tensors) + + # copy all the reduced value to each rank + for src_rank in range(len(data)): + data[src_rank][i].copy_(res.to(data[src_rank][i].device)) + + +class AllGather: + @torch.no_grad() + def work(self, data): + for src_rank in range(len(data)): + in_tensor_list = data[src_rank][1] + # Can't handle all_gather with multiple tensors + assert len(in_tensor_list) == 1 + src_tensor = in_tensor_list[0] + + for dest in data: + dest_tensor = dest[0][0][src_rank] + dest_tensor.copy_(src_tensor) + + +class Scatter: + def __init__(self, src): + self.src = src + + @torch.no_grad() + def work(self, data): + src_in_tensor_list = data[self.src][1] + # Can't handle scatter with multiple input tensor list + assert len(src_in_tensor_list) == 1 + src_in_tensors = src_in_tensor_list[0] + + for rank, each_rank_data in enumerate(data): + out_tensor_list = each_rank_data[0] + # Can't handle scatter with multiple output tensor + assert len(out_tensor_list) == 1 + dest_tensor = out_tensor_list[0] + dest_tensor.copy_(src_in_tensors[rank]) + + +class Gather: + def __init__(self, dst): + self.dst = dst + + @torch.no_grad() + def work(self, data): + # Can't handle gather with multiple tensor lists + assert len(data[self.dst][0]) == 1 + out_tensor_list = data[self.dst][0][0] + for rank, each_rank_data in enumerate(data): + src_in_tensor_list = each_rank_data[1] + # Can't handle gather with multiple tensor lists + assert len(src_in_tensor_list) == 1 + dest_tensor = out_tensor_list[rank] + dest_tensor.copy_(src_in_tensor_list[0]) + +class ReduceScatter: + def __init__(self, op): + if op != dist.ReduceOp.SUM: + raise NotImplementedError("ReduceScatter only supports SUM on threaded pg for now.") + self.op = op + + @torch.no_grad() + def work(self, data): + start_reduction = [False for _ in range(len(data))] + for each_rank_data in data: + # Can't handle reduce_scatter with multiple scatter list + assert len(each_rank_data[1]) == 1 + to_scatter = each_rank_data[1][0] + for i in range(len(to_scatter)): + dest_tensor_on_rank_i = data[i][0] + # Can't handle reduce_scatter with multiple output tensor + assert len(dest_tensor_on_rank_i) == 1 + dst_tensor_device = dest_tensor_on_rank_i[0].device + if not start_reduction[i]: + dest_tensor_on_rank_i[0].copy_(to_scatter[i].to(dst_tensor_device)) + start_reduction[i] = True + else: + dest_tensor_on_rank_i[0].add_(to_scatter[i].to(dst_tensor_device)) + +class Broadcast: + def __init__(self, src): + self.src = src + + @torch.no_grad() + def work(self, data): + in_tensor_list = flatten_list(data[self.src]) + for i in range(len(data)): + out_tensor_list = flatten_list(data[i]) + for j in range(len(in_tensor_list)): + out_tensor_list[j].copy_(in_tensor_list[j]) + + +class Collective: + def __init__(self, world_size, collective, pg): + self._world_size = world_size + self._collective = collective + + self._start_cond = threading.Condition() + self._done_cond = threading.Condition() + + self._data = [None] * world_size + self._count = 0 + self._done = False + + self._pg = pg + + def join(self, rank, data): + with self._start_cond: + self._data[rank] = data + self._count += 1 + + # notify rank 0 + if self._count == self._world_size: + if rank > 0: + self._start_cond.notify() + + if rank == 0: + self._start_cond.wait_for( + lambda: self._count == self._world_size or self._pg._terminate.is_set() + ) + # SystemExit is not a subclass of Exception but BaseException + # and can be distinguished from normal exception raised from program errors + # so that we can hide it from the exception queue + if self._pg._terminate.is_set(): + sys.exit("Test termination event occurs.") + + with self._done_cond: + # wait for rank 0 to finish + if rank > 0: + self._done_cond.wait_for(lambda: self._done or self._pg._terminate.is_set()) + if self._pg._terminate.is_set(): + sys.exit("Test termination event occurs.") + else: + # copy data around + self._collective.work(self._data) + self._done = True + self._done_cond.notify_all() + return ret_work(data) + + +class ProcessLocalGroup(dist.ProcessGroup): + _coll_lock = threading.Lock() + _cur_coll_on_pgs = {} + + _terminate = threading.Event() + + @classmethod + def _start_coll(cls, collective, pg): + with cls._coll_lock: + # pg_name is unique, we use that to record the mapping between pg and collective + if pg.pg_name not in cls._cur_coll_on_pgs: + cls._cur_coll_on_pgs[pg.pg_name] = Collective(pg.size(), collective, cls) + return cls._cur_coll_on_pgs[pg.pg_name] + + @classmethod + def _end_coll(cls, collective, pg): + # This is racily called by all ranks, so only one will work + with cls._coll_lock: + if pg.pg_name in cls._cur_coll_on_pgs and cls._cur_coll_on_pgs[pg.pg_name] == collective: + cls._cur_coll_on_pgs.pop(pg.pg_name) + + @classmethod + def exception_handle(cls, exc): + cls._terminate.set() + for coll in cls._cur_coll_on_pgs.values(): + with coll._start_cond: + coll._start_cond.notify() + with coll._done_cond: + coll._done_cond.notify_all() + + @classmethod + def reset(cls): + with cls._coll_lock: + cls._cur_coll_on_pgs = {} + cls._terminate.clear() + + def alltoall(self, output_tensor_list, input_tensor_list, opts=AllToAllOptions()): + coll = ProcessLocalGroup._start_coll(AllToAll(), self) + res = coll.join(self._rank, (output_tensor_list, input_tensor_list)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def allreduce(self, tensor_list, opts=AllreduceOptions()): + coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self) + res = coll.join(self._rank, tensor_list) + ProcessLocalGroup._end_coll(coll, self) + return res + + def allreduce_coalesced(self, tensor_list, opts=AllreduceOptions()): + coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self) + res = coll.join(self._rank, tensor_list) + ProcessLocalGroup._end_coll(coll, self) + return res + + def barrier(self, opts=BarrierOptions()): + return self.allreduce(tensor_list=[torch.ones(1)]) + + def allgather(self, output_tensors, input_tensor, opts=AllgatherOptions()): + coll = ProcessLocalGroup._start_coll(AllGather(), self) + res = coll.join(self._rank, (output_tensors, input_tensor)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def _allgather_base(self, output_tensor, input_tensor, opts=AllgatherOptions()): + tensor_list = list(torch.chunk(output_tensor, self._world_size)) + return self.allgather([tensor_list], [input_tensor], opts) + + def broadcast(self, tensor_list, opts=BroadcastOptions()): + coll = ProcessLocalGroup._start_coll(Broadcast(opts.rootRank), self) + res = coll.join(self._rank, tensor_list) + ProcessLocalGroup._end_coll(coll, self) + return res + + def scatter(self, output_tensors, input_tensors, opts=ScatterOptions()): + coll = ProcessLocalGroup._start_coll(Scatter(opts.rootRank), self) + res = coll.join(self._rank, (output_tensors, input_tensors)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def gather(self, output_tensors, input_tensors, opts=ScatterOptions()): + coll = ProcessLocalGroup._start_coll(Gather(opts.rootRank), self) + res = coll.join(self._rank, (output_tensors, input_tensors)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def reduce_scatter(self, output_tensor, scatter_list, opts=ReduceScatterOptions()): + coll = ProcessLocalGroup._start_coll(ReduceScatter(opts.reduceOp), self) + res = coll.join(self._rank, (output_tensor, scatter_list)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def _reduce_scatter_base(self, output_tensor, input_tensor, opts=AllgatherOptions()): + tensor_list = list(torch.chunk(input_tensor, self._world_size)) + return self.reduce_scatter([output_tensor], [tensor_list], opts) + + def allgather_into_tensor_coalesced(self, output_tensor_list, input_tensor_list): + res = None + for o_t, i_t in zip(output_tensor_list, input_tensor_list): + res = self._allgather_base(o_t, i_t) + return res + + def __init__(self, rank, world_size): + super().__init__(rank, world_size) + self._rank = rank + self._world_size = world_size + world = dist.distributed_c10d._world + if isinstance(world, ThreadLocalWorld): + world = world._get_world() + self._world = weakref.ref(world) + self._ctx = torch.autograd.set_multithreading_enabled(False) + + def size(self): + return self._world_size + + @property + def pg_name(self): + """ + return the global registered name of the current pg in the world + """ + return self._world().pg_names[self] + + def getBackendName(self): + return "threaded" + + def __repr__(self): + return f"ThreadedPG world_size:{self._world_size} rank:{self._rank}" + + +def _create_threaded_pg(prefix_store, rank, world_size, timeout): + pg = ProcessLocalGroup(rank, world_size) + # https://github.com/pytorch/pytorch/pull/103033 changed store based barrier to optional + # When device mesh involves sub groups while store based barrier is not enabled in c10d, + # even though threaded pg actual collectives are assumed to be single threaded, + # different threads may be initializing different groups, + # leading to race conditions. + # For example, if we have a mesh of [[0, 1], [2, 3]], the sub groups + # (dim 0 and 1) would be initialized in different threads independently. + # In this case we can no longer rely on class or global variables + # but have to rely on store based barrier to make sure each group + # is ready separately before we can invoke collectives in any of the groups. + + # the prefix store is already per group so we pass an empty name here + _store_based_barrier(rank, prefix_store, "", world_size, timeout) + return pg + + +dist.Backend.register_backend("threaded", _create_threaded_pg) + + +@dataclass +class WorldData: + default_pg: dist.ProcessGroup + pg_map: Dict[dist.ProcessGroup, Tuple[str, Optional[Store]]] + pg_names: Dict[dist.ProcessGroup, str] + pg_group_ranks: Dict[dist.ProcessGroup, Dict[int, int]] + pg_backend_config: Dict[dist.ProcessGroup, str] + group_count: int + tags_to_pg: Dict[str, List[dist.ProcessGroup]] + pg_to_tag: Dict[dist.ProcessGroup, str] + pg_coalesce_state: Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]] + pg_default_device: Dict[dist.ProcessGroup, torch.device] + + +class ThreadLocalWorld: + _world = threading.local() + + def _get_world(self) -> WorldData: + if not hasattr(ThreadLocalWorld._world, "world"): + ThreadLocalWorld._world.world = WorldData(None, {}, {}, {}, {}, 0, {}, {}, {}, {}) + return ThreadLocalWorld._world.world + + @property + def default_pg(self): + return self._get_world().default_pg + + @default_pg.setter + def default_pg(self, value): + self._get_world().default_pg = value + + @property + def pg_map(self): + return self._get_world().pg_map + + @property + def pg_names(self): + return self._get_world().pg_names + + @property + def pg_group_ranks(self): + return self._get_world().pg_group_ranks + + @property + def pg_backend_config(self): + return self._get_world().pg_backend_config + + @property + def group_count(self) -> int: + return self._get_world().group_count + + @group_count.setter + def group_count(self, value): + self._get_world().group_count = value + + @property + def tags_to_pg(self): + return self._get_world().tags_to_pg + + @property + def pg_to_tag(self): + return self._get_world().pg_to_tag + + @property + def pg_coalesce_state(self) -> Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]]: + return self._get_world().pg_coalesce_state + + @property + def pg_default_device(self) -> Dict[dist.ProcessGroup, torch.device]: + return self._get_world().pg_default_device + + +_old_pg_world = None +_ctx_manager = None + + +def _install_threaded_pg(): + global _old_pg_world + global _ctx_manager + _old_pg_world = dist.distributed_c10d._world + dist.distributed_c10d._world = ThreadLocalWorld() + _ctx_manager = torch.autograd.set_multithreading_enabled(False) + + return dist.distributed_c10d._world + + +def _uninstall_threaded_pg(): + dist.distributed_c10d._world = _old_pg_world diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd14a1993163fe38d77800ac91e21ca35a109b52 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a306a5cf48395bb5f270bba8dcb5693b9488c1f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff87e8533d045b27fb8800571f98969cd327472f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4d9f1d9b53ddc4f9bed44e9170a2fdd9d010be9c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py @@ -0,0 +1,733 @@ +#!/usr/bin/python3 +import enum +from typing import Tuple + +import torch +import torch.distributed.rpc as rpc +import torch.testing._internal.dist_utils as dist_utils +from torch import Tensor, nn +from torch._jit_internal import Future +from torch.distributed.nn import RemoteModule +from torch.distributed.nn.api.remote_module import _REMOTE_MODULE_PICKLED_ATTRIBUTES +from torch.distributed.nn.api.remote_module import _RemoteModule +from torch.testing._internal.common_distributed import skip_if_lt_x_gpu +from torch.testing._internal.common_utils import TemporaryFileName +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + + +_PARAM_VAL = torch.nn.Parameter(torch.ones(1)) + + +# RPC handler for querying the device on the destination worker. +def remote_device(module_rref): + for param in module_rref.local_value().parameters(): + return param.device + + +# RPC handler for querying __dict__ on the destination worker. +def remote_module_attributes(remote_module): + return remote_module.__dict__ + + +# RPC handler for running forward on the destination worker. +def remote_forward(remote_module, args): + return remote_module.forward(*args) + +# RPC handler for running forward_async on the destination worker. +def remote_forward_async(remote_module, args): + # Since future cannot be pickled and sent over the RPC layer, + # have to wait and behave just like ``forward_sync``. + return remote_module.forward_async(*args).wait() + +# RPC handler for getting training mode on the destination worker. +def get_remote_training_arg(module_rref): + return module_rref.local_value().training + +class ModuleCreationMode(enum.Enum): + MODULE_CTOR_WITH_INTERFACE = "module_ctor_with_interface" + MODULE_CTOR = "module_ctor" + + +@torch.jit.interface +class MyModuleInterface: + def forward( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Tuple[str, int, Tensor]: + # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well + pass + + +@torch.jit.interface +class RemoteMyModuleInterface: + def forward( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Tuple[str, int, Tensor]: + # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well + pass + + def forward_async( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Future[Tuple[str, int, Tensor]]: + pass + + +class MyModule(nn.Module): + def __init__(self, first_arg, first_kwarg=-1): + super().__init__() + self.param1 = _PARAM_VAL + + def forward( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Tuple[str, int, Tensor]: + return word, number, tensor + + +class BadModule: + def __init__(self, first_arg, first_kwarg=-1): + pass + + +def create_scripted_module(first_arg, first_kwarg=-1): + module = MyModule(first_arg, first_kwarg=first_kwarg) + scripted_module = torch.jit.script(module) + return scripted_module + + +# Common utils for both CPU and CUDA test suites +class CommonRemoteModuleTest(RpcAgentTestFixture): + @property + def world_size(self): # Override setting in RpcAgentTestFixture + return 2 + + @staticmethod + def _create_remote_module_iter(remote_device, modes=None): + if modes is None: + modes = ModuleCreationMode.__members__.values() + + args = (1,) + kwargs = dict(first_kwarg=2) + + if ModuleCreationMode.MODULE_CTOR in modes: + remote_module = RemoteModule(remote_device, MyModule, args, kwargs) + yield remote_module + + if ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE in modes: + remote_module = _RemoteModule( + remote_device, + create_scripted_module, + args, + kwargs, + _module_interface_cls=MyModuleInterface, + ) + scripted_remote_module = torch.jit.script(remote_module) + yield scripted_remote_module + + +class RemoteModuleTest(CommonRemoteModuleTest): + @dist_utils.dist_init + def test_bad_module(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + remote_device = f"{dst_worker_name}/cpu" + args = (1,) + kwargs = dict(first_kwarg=2) + + with self.assertRaisesRegex( + ValueError, + r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of ,", + ): + RemoteModule(remote_device, BadModule, args, kwargs).forward() + + with self.assertRaisesRegex( + ValueError, + r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of ,", + ): + RemoteModule(remote_device, BadModule, args, kwargs).forward() + + + @dist_utils.dist_init + def test_forward_async(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + args = (torch.ones(1), 2, "3") + for remote_module in self._create_remote_module_iter(dst_worker_name): + ret_fut = remote_module.forward_async(*args) + ret = ret_fut.wait() + self.assertEqual(ret, tuple(reversed(args))) + + @dist_utils.dist_init + def test_forward_async_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + scripted_remote_module = next( + self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ) + ) + + @torch.jit.script + def run_forward_async(scripted_remote_module: RemoteMyModuleInterface): + ret_fut = scripted_remote_module.forward_async(torch.ones(1), 2, "3") + ret = ret_fut.wait() + return ret + + ret = run_forward_async(scripted_remote_module) + + self.assertEqual(ret, ("3", 2, torch.ones(1))) + + @dist_utils.dist_init + def test_forward_sync(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + args = (torch.ones(1), 2, "3") + for remote_module in self._create_remote_module_iter(dst_worker_name): + ret = remote_module.forward(*args) + self.assertEqual(ret, tuple(reversed(args))) + + @dist_utils.dist_init + def test_forward_sync_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + scripted_remote_module = next( + self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ) + ) + + @torch.jit.script + def run_forward(scripted_remote_module: MyModuleInterface): + ret = scripted_remote_module.forward(torch.ones(1), 2, "3") + return ret + + ret = run_forward(scripted_remote_module) + + self.assertEqual(ret, ("3", 2, torch.ones(1))) + + @dist_utils.dist_init + def test_forward_with_kwargs(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + args = (torch.ones(1), 2) + kwargs = dict(word="3") + # Only test Python nn.Module, because script module methods don't support taking kwargs. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + ret_fut = remote_module.forward_async(*args, **kwargs) + ret = ret_fut.wait() + self.assertEqual(ret, tuple(reversed(args + ("3",)))) + + ret = remote_module.forward(*args, **kwargs) + self.assertEqual(ret, tuple(reversed(args + ("3",)))) + + @dist_utils.dist_init + def test_remote_parameters(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # Only test Python nn.Module, because script module methods don't support ``remote_parameters``. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + param_rrefs = remote_module.remote_parameters() + self.assertEqual(len(param_rrefs), 1) + self.assertTrue(torch.equal(param_rrefs[0].to_here(), _PARAM_VAL)) + + @dist_utils.dist_init + def test_get_module_rref(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # Only test Python nn.Module, because script module methods don't support ``get_module_rref``. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + rref = remote_module.get_module_rref() + self.assertEqual(rref, remote_module.module_rref) + for param in rref.to_here().parameters(): + self.assertTrue(torch.equal(param, _PARAM_VAL)) + + @dist_utils.dist_init + def test_train_eval(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + remote_module.train() + ret1 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),)) + self.assertEqual(ret1, True) + + remote_module.eval() + ret2 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),)) + self.assertEqual(ret2, False) + + @dist_utils.dist_init + def test_unsupported_methods(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + with self.assertRaisesRegex( + ValueError, r"Method ``register_buffer`` not supported for RemoteModule" + ): + remote_module.register_buffer("buffer", torch.ones(5)) + with self.assertRaisesRegex( + ValueError, + r"Method ``register_parameter`` not supported for RemoteModule", + ): + remote_module.register_parameter( + "param", torch.nn.Parameter(torch.ones(1)) + ) + with self.assertRaisesRegex( + ValueError, r"Method ``add_module`` not supported for RemoteModule" + ): + remote_module.add_module("empty", None) + + with self.assertRaisesRegex( + ValueError, r"Method ``apply`` not supported for RemoteModule" + ): + fn = torch.rand((3, 3), requires_grad=False) + remote_module.apply(fn) + + with self.assertRaisesRegex( + ValueError, r"Method ``cuda`` not supported for RemoteModule" + ): + remote_module.cuda() + with self.assertRaisesRegex( + ValueError, r"Method ``cpu`` not supported for RemoteModule" + ): + remote_module.cpu() + with self.assertRaisesRegex( + ValueError, r"Method ``type`` not supported for RemoteModule" + ): + remote_module.type(torch.FloatTensor) + with self.assertRaisesRegex( + ValueError, r"Method ``float`` not supported for RemoteModule" + ): + remote_module.float() + with self.assertRaisesRegex( + ValueError, r"Method ``double`` not supported for RemoteModule" + ): + remote_module.double() + with self.assertRaisesRegex( + ValueError, r"Method ``bfloat16`` not supported for RemoteModule" + ): + remote_module.bfloat16() + with self.assertRaisesRegex( + ValueError, r"Method ``to`` not supported for RemoteModule" + ): + remote_module.to("cpu", dtype=torch.int32) + + def hook(module, grad_input, grad_output): + pass + + with self.assertRaisesRegex( + ValueError, + r"Method ``register_backward_hook`` not supported for RemoteModule", + ): + remote_module.register_backward_hook(hook) + with self.assertRaisesRegex( + ValueError, + r"Method ``register_forward_pre_hook`` not supported for RemoteModule", + ): + remote_module.register_forward_pre_hook(hook) + with self.assertRaisesRegex( + ValueError, + r"Method ``register_forward_hook`` not supported for RemoteModule", + ): + remote_module.register_forward_hook(hook) + + with self.assertRaisesRegex( + ValueError, r"Method ``state_dict`` not supported for RemoteModule" + ): + remote_module.state_dict() + with self.assertRaisesRegex( + ValueError, r"Method ``load_state_dict`` not supported for RemoteModule" + ): + remote_module.load_state_dict({}) + + with self.assertRaisesRegex( + ValueError, + r"Method ``parameters`` not supported for RemoteModule. Please use ``remote_parameters`` instead.", + ): + remote_module.parameters() + with self.assertRaisesRegex( + ValueError, + r"Method ``named_parameters`` not supported for RemoteModule", + ): + remote_module.named_parameters() + with self.assertRaisesRegex( + ValueError, r"Method ``buffers`` not supported for RemoteModule" + ): + remote_module.buffers() + with self.assertRaisesRegex( + ValueError, r"Method ``named_buffers`` not supported for RemoteModule" + ): + remote_module.named_buffers() + with self.assertRaisesRegex( + ValueError, r"Method ``children`` not supported for RemoteModule" + ): + remote_module.children() + with self.assertRaisesRegex( + ValueError, r"Method ``named_children`` not supported for RemoteModule" + ): + remote_module.named_children() + with self.assertRaisesRegex( + ValueError, r"Method ``modules`` not supported for RemoteModule" + ): + remote_module.modules() + with self.assertRaisesRegex( + ValueError, r"Method ``named_modules`` not supported for RemoteModule" + ): + remote_module.named_modules() + + with self.assertRaisesRegex( + ValueError, r"Method ``requires_grad_`` not supported for RemoteModule" + ): + remote_module.requires_grad_() + with self.assertRaisesRegex( + ValueError, r"Method ``zero_grad`` not supported for RemoteModule" + ): + remote_module.zero_grad() + with self.assertRaisesRegex( + ValueError, r"Method ``share_memory`` not supported for RemoteModule" + ): + remote_module.share_memory() + with self.assertRaisesRegex( + ValueError, r"Method ``extra_repr`` not supported for RemoteModule" + ): + remote_module.extra_repr() + + @dist_utils.dist_init + def test_send_remote_module_with_a_new_attribute_not_pickled_over_the_wire(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # If a new attribute is added to this RemoteModule after the initialization, + # and it will be sent over the wire by RPC, + # this new field will not be pickled, because it's not specified in _REMOTE_MODULE_PICKLED_ATTRIBUTES. + # Note that adding a new attribute out of constructor should rarely happen. + # If a new attribute is added to RemoteModule constructor, + # there is a sanity check to enforce developers to add this attribute to either + # _REMOTE_MODULE_PICKLED_ATTRIBUTES or _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + new_attr_name = "new_attr" + setattr(remote_module, new_attr_name, 1) + + attrs = rpc.rpc_sync( + dst_worker_name, remote_module_attributes, (remote_module,) + ) + self.assertNotIn(new_attr_name, attrs) + + @dist_utils.dist_init + def test_remote_module_py_pickle_not_supported(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + with TemporaryFileName() as fname: + with self.assertRaisesRegex( + RuntimeError, + "Cannot pickle RemoteModule in python pickler. RemoteModule can only be pickled when using RPC", + ): + torch.save(remote_module, fname) + + @dist_utils.dist_init + def test_remote_module_py_pickle_not_supported_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ): + with TemporaryFileName() as fname: + with self.assertRaisesRegex(torch.jit.Error, "can only be pickled when using RPC"): + torch.save(remote_module, fname) + + +class ThreeWorkersRemoteModuleTest(CommonRemoteModuleTest): + @property + def world_size(self): # Override setting in CommonRemoteModuleTest + return 3 + + @dist_utils.dist_init + def test_send_remote_module_over_the_wire(self): + if self.rank != 0: + return + dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size) + + # Unpickled attributes include both the inherent attributes of RemoteModule + # (not inherited from the superclass) and two installed methods. + expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES) + expected_unpickled_attrs.append("forward_async") + expected_unpickled_attrs.append("forward") + + # Create a remote module on worker1 and then pass it to worker2 over the RPC layer. + for remote_module in self._create_remote_module_iter( + dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + # Test querying some simple attributes from worker2. + attrs = rpc.rpc_sync( + dst_worker2_name, remote_module_attributes, (remote_module,) + ) + self.assertListEqual(list(attrs.keys()), expected_unpickled_attrs) + self.assertEqual(attrs["on"], "worker1") + self.assertEqual(attrs["device"], "cpu") + self.assertFalse(attrs["is_device_map_set"]) + self.assertFalse(attrs["is_scriptable"]) + + # Test the installed methods on worker1's can be initiated by worker2 over RPC layer. + # NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``forward_async``, + # not have another worker to initiate forward over the RPC layer. + args = (torch.ones(1), 2, "3") + ret1 = rpc.rpc_sync(dst_worker2_name, remote_forward, (remote_module, args)) + self.assertEqual(ret1, tuple(reversed(args))) + ret2 = rpc.rpc_sync( + dst_worker2_name, remote_forward_async, (remote_module, args) + ) + self.assertEqual(ret2, tuple(reversed(args))) + + @dist_utils.dist_init + def test_send_remote_module_over_the_wire_script_not_supported(self): + if self.rank != 0: + return + dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size) + + # Unpickled attributes include both the inherent attributes of RemoteModule + # (not inherited from the superclass) and two installed methods. + expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES) + expected_unpickled_attrs.append("forward_async") + expected_unpickled_attrs.append("forward") + + with self.assertRaisesRegex( + RuntimeError, "Passing a script RemoteModule over RPC is not supported." + ): + # Create a remote module on worker1 and then pass it to worker2 over the RPC layer. + for remote_module in self._create_remote_module_iter( + dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ): + # Test querying some simple attributes from worker2. + attrs = rpc.rpc_sync( + dst_worker2_name, remote_module_attributes, (remote_module,) + ) + + @dist_utils.dist_init + def test_create_remote_module_from_module_rref(self): + if self.rank != 0: + return + dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size) + + # Create a remote module on worker1 and then pass its `module_rref` to worker2 over the RPC layer. + for remote_module in self._create_remote_module_iter( + dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + remote_module2 = rpc.rpc_sync( + dst_worker2_name, + RemoteModule.init_from_module_rref, + (dst_worker2_name, remote_module.get_module_rref()), + ) + + args = (torch.ones(1), 2, "3") + ret1 = rpc.rpc_sync( + dst_worker1_name, remote_forward, (remote_module, args) + ) + ret2 = rpc.rpc_sync( + dst_worker2_name, remote_forward, (remote_module2, args) + ) + self.assertEqual(ret2, ret2) + + +class CudaRemoteModuleTest(CommonRemoteModuleTest): + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_valid_device(self): + if self.rank != 0: + return + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = dist_utils.worker_name(dst_rank) + + for remote_module in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR] + ): + device = rpc.rpc_sync( + dst_worker_name, remote_device, (remote_module.module_rref,) + ) + self.assertEqual(device.type, "cuda") + self.assertEqual(device.index, 0) + + # Test rank works as well. + for remote_module in self._create_remote_module_iter( + f"rank:{dst_rank}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR] + ): + device = rpc.rpc_sync( + dst_worker_name, remote_device, (remote_module.module_rref,) + ) + self.assertEqual(device.type, "cuda") + self.assertEqual(device.index, 0) + + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_invalid_devices(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + with self.assertRaisesRegex( + RuntimeError, + r"Expected one of .+ device type at start of device string", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/foo", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + RuntimeError, r"CUDA error: invalid device ordinal" + ): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:100", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex(RuntimeError, r"Invalid device string: 'cpu2'"): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/cpu2", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex(RuntimeError, r"Device string must not be empty"): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + ValueError, + r"Could not parse remote_device: worker1/cuda:0/cuda:1. The valid format is '/'", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0/cuda:1", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + ValueError, + r"Could not parse remote_device: /. The valid format is '/'", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + "/", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + ValueError, + r"Could not parse remote_device: /cuda:0. The valid format is '/'", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + "/cuda:0", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_input_moved_to_cuda_device(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # These two CPU tensors (in args and kwargs) should be implicitly moved to an appropriate cuda device. + t1 = torch.ones(1) + args = (t1, 2) + t2 = t1 * 2 + kwargs = dict(word=t2) + + # Only test Python nn.Module, because script module methods don't support taking kwargs. + for remote_module in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR] + ): + ret_fut = remote_module.forward_async(*args, **kwargs) + ret = ret_fut.wait() + self.assertEqual(ret, tuple(reversed(args + (t2,)))) + # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0". + self.assertEqual(ret[0].device.type, "cpu") + self.assertEqual(ret[2].device.type, "cpu") + + ret = remote_module.forward(*args, **kwargs) + self.assertEqual(ret, tuple(reversed(args + (t2,)))) + # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0". + self.assertEqual(ret[0].device.type, "cpu") + self.assertEqual(ret[2].device.type, "cpu") + + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_input_moved_to_cuda_device_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + scripted_remote_module = next( + self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0", + modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE], + ) + ) + + @torch.jit.script + def run_forward(scripted_remote_module: MyModuleInterface): + ret = scripted_remote_module.forward(torch.ones(1), 2, "3") + return ret + + ret = run_forward(scripted_remote_module) + + self.assertEqual(ret, ("3", 2, torch.ones(1))) + # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0". + self.assertEqual(ret[2].device.type, "cpu") diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipe_with_ddp_test.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipe_with_ddp_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ab782479fb190dbf39e1fa377abed03858aef640 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipe_with_ddp_test.py @@ -0,0 +1,147 @@ +import torch +import torch.distributed as dist + +from torch import nn +from torch.nn.parallel import DistributedDataParallel +from torch.testing._internal.dist_utils import INIT_METHOD_TEMPLATE, dist_init +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.common_distributed import ( + requires_gloo, + requires_nccl, + skip_if_lt_x_gpu, + skip_if_rocm, +) +from torch.distributed.pipeline.sync import Pipe + +class PipeWithDDPTest(RpcAgentTestFixture): + @property + def world_size(self) -> int: + return 2 + + @skip_if_lt_x_gpu(4) + @requires_nccl() + @dist_init + @skip_if_rocm + def test_basic_nccl_ckpt_never(self): + self._run_basic_test("nccl", "never") + + @skip_if_lt_x_gpu(4) + @requires_nccl() + @dist_init + @skip_if_rocm + def test_basic_nccl_ckpt_never_find_unused(self): + self._run_basic_test("nccl", "never", find_unused_parameters=True) + + @skip_if_lt_x_gpu(4) + @requires_nccl() + @dist_init + @skip_if_rocm + def test_basic_nccl_ckpt_always(self): + self._run_basic_test("nccl", "always", static_graph=True) + + @skip_if_lt_x_gpu(4) + @requires_nccl() + @dist_init + @skip_if_rocm + def test_basic_nccl_ckpt_except_last(self): + self._run_basic_test("nccl", "except_last", static_graph=True) + + @skip_if_lt_x_gpu(4) + @requires_gloo() + @dist_init + @skip_if_rocm + def test_basic_gloo_ckpt_never(self): + self._run_basic_test("gloo", "never") + + @skip_if_lt_x_gpu(4) + @requires_gloo() + @dist_init + @skip_if_rocm + def test_basic_gloo_ckpt_never_find_unused(self): + self._run_basic_test("gloo", "never", find_unused_parameters=True) + + @skip_if_lt_x_gpu(4) + @requires_gloo() + @dist_init + @skip_if_rocm + def test_basic_gloo_ckpt_always(self): + self._run_basic_test("gloo", "always", static_graph=True) + + @skip_if_lt_x_gpu(4) + @requires_gloo() + @dist_init + @skip_if_rocm + def test_basic_gloo_ckpt_except_last(self): + self._run_basic_test("gloo", "except_last", static_graph=True) + + def _run_basic_test(self, backend, checkpoint, find_unused_parameters=False, static_graph=False): + dist.init_process_group( + backend=backend, + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + # Use 4 GPUs, two replicas of a pipe across GPU 0 and 1 and another + # pipe between GPU 2 and 3. Both replicas are replicated via DDP. + fc1 = nn.Linear(16, 8, bias=False).cuda(2 * self.rank) + + class MyModule(nn.Module): + def __init__(self, device): + super().__init__() + self.fc2 = nn.Linear(8, 4, bias=False).cuda(device) + self.fc3 = nn.Linear(4, 2, bias=False).cuda(device) + + def forward(self, inp): + if find_unused_parameters: + return self.fc2(inp) + else: + return self.fc3(self.fc2(inp)) + + layer2 = MyModule(2 * self.rank + 1) + model = nn.Sequential( + fc1, + layer2 + ) + model = Pipe(model, chunks=2, checkpoint=checkpoint) + model = DistributedDataParallel( + model, + find_unused_parameters=find_unused_parameters, + static_graph=static_graph, + ) + + # Ensure inputs are different across ranks to verify that gradient + # sync indeed occurs. + model_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1) + out = model(model_input).local_value() + out.sum().backward() + + # Run forward again for find_unused_parameters to trigger any potential errors. + if find_unused_parameters: + # Ensure inputs are different across ranks to verify that gradient + # sync indeed occurs. + unused_param_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1) + model(unused_param_input).local_value().sum().backward() + + # Run a few more iterations of fwd + bwd to ensure gradient synchronization + # occurs properly across iterations via delay_all_reduce/bucketized allreduce. + for _ in range(3): + model_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1) + out = model(model_input).local_value() + out.sum().backward() + + # Check grads + output = [torch.empty_like(fc1.weight.grad), torch.empty_like(fc1.weight.grad)] + dist.all_gather(output, fc1.weight.grad) + self.assertEqual(output[0], output[1]) + + output = [torch.empty_like(layer2.fc2.weight.grad), torch.empty_like(layer2.fc2.weight.grad)] + dist.all_gather(output, layer2.fc2.weight.grad) + self.assertEqual(output[0], output[1]) + + if not find_unused_parameters: + output = [torch.empty_like(layer2.fc3.weight.grad), torch.empty_like(layer2.fc3.weight.grad)] + dist.all_gather(output, layer2.fc3.weight.grad) + self.assertEqual(output[0], output[1]) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipeline/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipeline/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipeline/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipeline/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fd458ce948f5a146b2f35a15a192caa1fef8b6d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipeline/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5301b7132b90cb9819299a83a806d14405f3ea8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_autograd_test.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_autograd_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1df7f4c718cbd0a3f9cac78294582d7c217c7602 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_autograd_test.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_optimizer_test.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_optimizer_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab106c84a93ca429c7fee731078357e0061a4a9c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_optimizer_test.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_agent_rpc_test.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_agent_rpc_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4fbb706dc974aa67a433ebae34749ad250d3e88 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_agent_rpc_test.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_rpc_agent_test_fixture.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_rpc_agent_test_fixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e5c7a6f8fbc88ceee29acd0a448372c90bf9031 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_rpc_agent_test_fixture.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/rpc_agent_test_fixture.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/rpc_agent_test_fixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d7a57162d97b28106a89348e179faec5c114852 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/rpc_agent_test_fixture.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/rpc_test.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/rpc_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d170c70c4c4586520f128c86ae09b17a87eeea5b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/rpc_test.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/tensorpipe_rpc_agent_test_fixture.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/tensorpipe_rpc_agent_test_fixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d16a1aa9e316a3b7664349162b02067207a4e02 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/tensorpipe_rpc_agent_test_fixture.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py new file mode 100644 index 0000000000000000000000000000000000000000..44e9887f3ba5fad7d98d5244916a89bf59474678 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py @@ -0,0 +1,2781 @@ +import sys +import threading +import time +from enum import Enum +import random +import torch +import torch.nn as nn +from datetime import timedelta +import torch.distributed as dist +import torch.distributed.autograd as dist_autograd +import torch.distributed.rpc as rpc +import torch.testing._internal.dist_utils +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.distributed.rpc import RRef +from torch.testing._internal.common_utils import IS_MACOS, skip_but_pass_in_sandcastle_if +from torch.testing._internal.dist_utils import ( + dist_init, + initialize_pg, + wait_until_node_failure, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.common_distributed import skip_if_lt_x_gpu + + +# Right now we test up to 3-layer nested rpc calls. +# rpc_done[1] and ctx_ids[1] represent rpc is done in prev rank, and context id +# sent from prev rank respectively. +# rpc_done[2] and ctx_ids[2] represents for prev of prev rank. +# rpc_done[3] and ctx_ids[3] represents for prev of prev of prev rank. +# rpc_done[0] and ctx_ids[0] represents for current rank, but mostly not used. +rpc_done = [False, False, False, False] +ctx_ids = [-1, -1, -1, -1] + +known_context_ids = set() + +requires_grad_tensor = torch.ones(3, 3, requires_grad=True) + +# Send rpc done info and context_id to +# dst_rank = (self.rank + rank_distance) % self.world_size +# we don't need a lock here since the GIL is held while executing remote +# python UDFs, so access is serialized across several workers. +def _set_rpc_done(ctx_id, rank_distance): + global rpc_done + global ctx_ids + global known_context_ids + rpc_done[rank_distance] = True + ctx_ids[rank_distance] = ctx_id + known_context_ids.add(ctx_id) + + +def _check_rpc_done(rank_distance): + while not rpc_done[rank_distance]: + time.sleep(0.1) + + +def _torch_ones(sizes, requires_grad=False): + return torch.ones(sizes, requires_grad=requires_grad) + +# This method must be called on the rref owner, and verifies that the grad of +# rref tensor equals to the given grad. +def _compare_owner_value(context_id, rref, grad): + grads = dist_autograd.get_gradients(context_id) + x = grads[rref.local_value()] + if x.is_sparse: + assert grad.is_sparse + x = x.to_dense() + grad = grad.to_dense() + else: + assert not grad.is_sparse + return torch.equal(x, grad) + + +def create_tensor(): + return torch.ones((3, 3), requires_grad=True) + + +def build_sparse_tensor(coalesce=False, requires_grad=True, dtype=torch.float32): + i = [[0, 1, 1], [2, 0, 2]] + v = [3.2, 4.1, 5.3] + tensor = torch.sparse_coo_tensor( + i, v, (3, 3), requires_grad=requires_grad, dtype=dtype + ) + if coalesce: + tensor = tensor.coalesce() + return tensor + + +@torch.jit.script +def create_torchscript_tensor() -> torch.Tensor: + return torch.ones((3, 3)).requires_grad_() + + +def my_py_add(t1, t2): + return torch.add(t1, t2) + + +def my_scalar_add(a, b): + return a + b + + +def my_rref_add(rref_t1, t2): + ret = torch.add(rref_t1.local_value(), t2) + return ret + + +@torch.jit.script +def my_script_add(t1, t2): + return torch.add(t1, t2) + + +@torch.jit.script +def my_script_ref_add(ref_t1: RRef[torch.Tensor], t2: torch.Tensor) -> torch.Tensor: + t1 = ref_t1.to_here() + return torch.add(t1, t2) + + +def my_nested_rref_add(dst, rref_t1, t2): + return rpc.rpc_sync(dst, my_rref_add, args=(rref_t1, t2)) + + +def ret_requires_grad(): + return requires_grad_tensor + + +def my_py_nested_call(t1, t2, dst, world_size, hops): + next_dst = (dst + 1) % world_size + if hops > 0: + return rpc.rpc_sync( + worker_name(next_dst), + my_py_nested_call, + args=(t1, t2, next_dst, world_size, hops - 1), + ) + else: + return rpc.rpc_sync(worker_name(next_dst), my_py_add, args=(t1, t2)) + + +# after dist autograd context is cleaned up, it should be cleaned up on other +# nodes. This helper allows timeout_seconds for those RPCs to be completed, and +# ensures that all the contexts have been cleaned up in that timeframe.any +def _all_contexts_cleaned_up(timeout_seconds=10): + global known_context_ids + start = time.time() + context_id_to_raised = set() + while ( + time.time() - start < timeout_seconds + and context_id_to_raised != known_context_ids + ): + for context_id in known_context_ids: + try: + dist_autograd._retrieve_context(context_id) + except RuntimeError: + context_id_to_raised.add(context_id) + # all contexts have been cleaned up if trying to retrieve any context resulted in a RuntimeError. + success = context_id_to_raised == known_context_ids + return success + + +# This function creates a dis autograd context, run rpc_sync on the given ps, +# and then blocks until the ps has verified the grads are correctly accumulated. +def _run_trainer(rref_t1, t2, ps, rank_diff, sparse): + with dist_autograd.context() as context_id: + ret = rpc.rpc_sync(ps, my_rref_add, args=(rref_t1, t2)) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + dist_autograd.backward(context_id, [loss]) + # prevent deleting dist autograd context + rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff)) + rpc.rpc_sync(ps, _check_rpc_done, args=(0,)) + +# This function is the same as _run_trainer, except rpc calls torchscript +# function "my_script_ref_add" instead of python function "my_rref_add" +def _run_trainer_torchscript(rref_t1, t2, ps, rank_diff, sparse): + with dist_autograd.context() as context_id: + ret = rpc.rpc_sync(ps, my_script_ref_add, args=(rref_t1, t2)) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + dist_autograd.backward(context_id, [loss]) + # prevent deleting dist autograd context + rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff)) + rpc.rpc_sync(ps, _check_rpc_done, args=(0,)) + + +class SimulateBackwardError(Function): + _simulate_error = True + + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + @once_differentiable + def backward(ctx, input): + if SimulateBackwardError._simulate_error: + raise Exception("Simulate error on backward pass") + else: + return input + + +class ExecMode(Enum): + LOCAL = 1 # Run the operation locally. + RPC_SYNC = 2 # Run the operation using rpc_sync + REMOTE = 3 # Run the operation using remote. + RPC_ASYNC = 4 # Run the operation using rpc_async + + +# Common utils for both CPU and CUDA test suites +class CommonDistAutogradTest(RpcAgentTestFixture): + def _exec_func_with_dst(self, dst, exec_mode, method, *args): + if ExecMode.LOCAL == exec_mode: + if len(args) == 1 and isinstance(args[0], list): + return method(*args[0]) + return method(*args) + elif ExecMode.RPC_SYNC == exec_mode: + return rpc.rpc_sync(worker_name(dst), method, args=(args)) + elif ExecMode.REMOTE == exec_mode: + return rpc.remote(worker_name(dst), method, args=(args)).to_here() + elif ExecMode.RPC_ASYNC == exec_mode: + fut = rpc.rpc_async(worker_name(dst), method, args=(args)) + return fut.wait() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + def _exec_func(self, exec_mode, method, *args): + return self._exec_func_with_dst( + self._next_rank(), exec_mode, method, *args + ) + + def _next_rank(self): + if hasattr(self, "dst_rank"): + self.dst_rank = (self.dst_rank + 1) % self.world_size + if self.dst_rank == self.rank: + return self._next_rank() + else: + self.dst_rank = (self.rank + 1) % self.world_size + return self.dst_rank + + def _check_rpc_done(self, rank_distance): + _check_rpc_done(rank_distance) + + def _verify_backwards(self, exec_mode, tensors, context_id, local_grads, *args): + if exec_mode == ExecMode.LOCAL: + torch.autograd.backward(tensors) + return [arg.grad for arg in args] + else: + self._verify_backwards_remote(tensors, context_id, local_grads, *args) + + def _verify_backwards_remote(self, tensors, context_id, local_grads, *args): + dist_autograd.backward(context_id, tensors) + + # Verify grads were accumulated appropriately. + grads = dist_autograd.get_gradients(context_id) + nargs = len(args) + ngrads = 0 + for i in range(0, nargs): + if local_grads[i] is not None: + self.assertIn(args[i], grads) + self.assertEqual(local_grads[i], grads[args[i]]) + ngrads += 1 + else: + self.assertNotIn(args[i], grads) + + self.assertEqual(ngrads, len(grads)) + + def _test_graph(self, fn, exec_mode, sparse): + dst_rank = (self.rank + 1) % self.world_size + + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor() + t2 = build_sparse_tensor() + else: + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync(worker_name(dst_rank), fn, args=(t1, t2)) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), fn, args=(t1, t2) + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + + # Verify graph for current context id. + ctx = dist_autograd._current_context() + self.assertEqual(context_id, ctx._context_id()) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + recv_functions = ctx._recv_functions() + self.assertEqual(1, len(recv_functions)) + self._verify_graph_for_first_rpc_call( + next(iter(send_functions.values())), + next(iter(recv_functions.values())), + t1, + t2, + ret, + ) + + # Wait for the prev rank to be done with rpc. + self._check_rpc_done(1) + # Verify graph for previous context id. + ctx = dist_autograd._retrieve_context(ctx_ids[1]) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + self._verify_graph_for_rpc_call_exec(next(iter(send_functions.values()))) + # this barrier is needed so one worker does not clean up their + # autograd context before another worker tries to access it. + dist.barrier() + + # autograd context should be cleaned up by now. + with self.assertRaises(RuntimeError): + ctx = dist_autograd._retrieve_context(context_id) + + # No autograd context available. + with self.assertRaises(RuntimeError): + ctx = dist_autograd._current_context() + + # 3-layer nested calls + def _test_graph_for_py_nested_call(self, exec_mode, sparse): + dst_rank = (self.rank + 1) % self.world_size + + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + else: + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + nest_dst_rank = (dst_rank + 1) % self.world_size + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_py_nested_call, + args=(t1, t2, dst_rank, self.world_size, 1), + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), + my_py_nested_call, + args=(t1, t2, dst_rank, self.world_size, 1), + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + # Barrier to ensure all RPCs are done. + dist.barrier() + + for rd in [1, 2, 3]: + rpc.rpc_sync( + worker_name((self.rank + rd) % self.world_size), + _set_rpc_done, + args=(context_id, rd), + ) + + # Barrier to ensure all set_rpc_done have completed. + dist.barrier() + + # For self.rank, it has 4 graphs to verify + # One is for current context id when this rank send first rpc call. + # Second one is for prev context id when this rank make 1st nested + # call. + # Third one is for prev prev context id when this rank make + # 2nd nested call. + # Last one is for prev prev prev context id when this rank + # execute the torch.add() operator. + + # Verify first graph for current context id. + ctx = dist_autograd._current_context() + self.assertEqual(context_id, ctx._context_id()) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + recv_functions = ctx._recv_functions() + self.assertEqual(1, len(recv_functions)) + self._verify_graph_for_first_rpc_call( + next(iter(send_functions.values())), + next(iter(recv_functions.values())), + t1, + t2, + ret, + ) + + # Verify second graph for 1st nested call. + ctx = dist_autograd._retrieve_context(ctx_ids[1]) + self._verify_graph_for_nested_rpc_call(ctx) + + # Verify third graph for 2nd nested call. + ctx = dist_autograd._retrieve_context(ctx_ids[2]) + self._verify_graph_for_nested_rpc_call(ctx) + + # verify last graph for rpc call execution. + ctx = dist_autograd._retrieve_context(ctx_ids[3]) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + self._verify_graph_for_rpc_call_exec(next(iter(send_functions.values()))) + # this barrier is needed so one worker does not clean up their + # autograd context before another worker tries to access it. + dist.barrier() + + # Rank0->Rank1->Rank0 + def _test_graph_for_py_nested_call_itself(self, exec_mode, sparse): + dst_rank = (self.rank + 1) % self.world_size + + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + else: + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_py_nested_call, + args=( + t1, + t2, + (self.rank - 1 + self.world_size) % self.world_size, + self.world_size, + 0, + ), + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), + my_py_nested_call, + args=( + t1, + t2, + (self.rank - 1 + self.world_size) % self.world_size, + self.world_size, + 0, + ), + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + rpc.rpc_sync( + worker_name((self.rank + 1) % self.world_size), + _set_rpc_done, + args=(context_id, 1), + ) + + # For self.rank, it has 2 graphs to verify. + # One is for current context id when this rank send first rpc + # call and execute the torch.add() operator. + # Another one is for prev context id when this rank make + # nested call. + ctx = dist_autograd._current_context() + self.assertEqual(context_id, ctx._context_id()) + send_functions = ctx._send_functions() + self.assertEqual(2, len(send_functions)) + recv_functions = ctx._recv_functions() + self.assertEqual(2, len(recv_functions)) + self._verify_graph_for_first_rpc_call( + next(iter(send_functions.values())), + list(recv_functions.values())[1], + t1, + t2, + ret, + ) + self._verify_graph_for_rpc_call_exec(list(send_functions.values())[1]) + + # Verify two pairs of send and recv functions for nested + # call + self._check_rpc_done(1) + ctx = dist_autograd._retrieve_context(ctx_ids[1]) + self._verify_graph_for_nested_rpc_call(ctx) + # this barrier is needed so one worker does not clean up their + # autograd context before another worker tries to access it. + dist.barrier() + + def _test_no_graph_with_tensors_not_require_grad(self, exec_mode, sparse): + initialize_pg(self.file_init_method, self.rank, self.world_size) + dst_rank = (self.rank + 1) % self.world_size + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor(requires_grad=False) + t2 = build_sparse_tensor(requires_grad=False) + else: + t1 = torch.ones(3, 3, requires_grad=False) + t2 = torch.zeros(3, 3, requires_grad=False) + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(t1, t2) + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), torch.add, args=(t1, t2) + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + + ctx = dist_autograd._current_context() + send_functions = ctx._send_functions() + self.assertEqual(len(send_functions), 0) + recv_functions = ctx._recv_functions() + self.assertEqual(len(recv_functions), 0) + + # Wait for the prev rank to be done with rpc. + self._check_rpc_done(1) + # NB: RRef.to_here() always passes the autograd context to the + # the callee, as the caller does not know whether the return + # value would contain a requires_grad tensor or not. + # + # rpc/remote with udf (_set_rpc_done here) also always passes the + # autograd context to the callee due to the same reason. + self.assertNotEqual(-1, dist_autograd._retrieve_context(ctx_ids[1])) + dist.barrier() + + def _test_rpc_complex_args(self, exec_mode, sparse): + with dist_autograd.context() as context_id: + num_tensors = 10 + tensors = [] + for i in range(num_tensors): + if sparse: + tensor = build_sparse_tensor(requires_grad=(i % 2 == 0)) + else: + tensor = torch.ones(3, 3, requires_grad=(i % 2 == 0)) + tensors.append(tensor) + dst_rank = self._next_rank() + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.stack, args=(tensors,) + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), torch.stack, args=(tensors,) + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + self.assertEqual(torch.stack(tensors), ret) + + # Verify appropriate tensors have been attached the autograd graph. + next_funcs = next(iter(dist_autograd._current_context()._send_functions().values())).next_functions + idx = 0 + for i in range(len(next_funcs)): + self.assertEqual( + "torch::autograd::AccumulateGrad", next_funcs[i][0].name() + ) + self.assertEqual(tensors[i], next_funcs[i][0].variable) + + # Verify that the worker id has been recorded in the context + ctx = dist_autograd._current_context() + worker_ids = ctx._known_worker_ids() + self.assertEqual(len(worker_ids), 1) + self.assertEqual(worker_ids, {dst_rank}) + + def context_cleanup_test_helper(self, rpc_args, func, nested=False): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + # test that in dist autograd, in the case that tensors communicated over RPC do + # NOT require grad, we still cleanup the dist autograd contexts created + # on other nodes. This is because the autograd context is still + # communicated over RPC even if tensor arguments do not require grad, as + # it is possible that the response could. + if nested: + dst_rank = (self.rank + 1) % self.world_size + nested_dst_rank = (dst_rank + 1) % self.world_size + dst_ranks = {dst_rank} + else: + dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank} + + with dist_autograd.context() as context_id: + for dst_rank in dst_ranks: + rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + if nested: + rpc.rpc_sync( + worker_name(nested_dst_rank), + _set_rpc_done, + args=(context_id, 2), + ) + # the thread's context id should be cleaned up + with self.assertRaises(RuntimeError): + dist_autograd._retrieve_context(context_id) + # Ensure all peers have finished mutating the + # `known_context_ids` set. + dist.barrier() + # check that all contexts have been cleaned up. + success = _all_contexts_cleaned_up() + self.assertTrue(success) + + def _backward_no_grad_on_tensor(self, t1, t2, sparse): + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + torch.add, + args=(t1, t2)) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + self.assertIsNone(t1.grad) + self.assertIsNone(t2.grad) + + # Now populate .grad with local autograd engine and + # verify dist autograd doesn't mess with it. + loss_local = torch.add(t1, t2) + if sparse: + loss_local = torch.sparse.sum(loss_local) + else: + loss_local = loss_local.sum() + loss_local.backward() + self.assertIsNotNone(t1.grad) + self.assertIsNotNone(t2.grad) + + t1_grad_before = t1.grad + t2_grad_before = t2.grad + dist_autograd.backward(context_id, [loss]) + self.assertEqual(t1_grad_before, t1.grad) + self.assertEqual(t2_grad_before, t2.grad) + + # The current rank first creates a tensor on the rref_owner, and then passes + # the rref with another tensor to the callee to run either my_rref_add or + # my_nested_rref_add, depending on whether the callee is the rref owner. + # The grad of tensor lives on the current rank, and the grad of the rref + # tensor lives on the rref owner. + def _backward_rref(self, callee, rref_owner, t1, t2, local_grads, sparse): + local_ret = torch.add(t1, t2) + if sparse: + local_ret = torch.sparse.sum(local_ret) + else: + local_ret = local_ret.sum() + local_ret.backward() + with dist_autograd.context() as context_id: + if sparse: + rref_t1 = rpc.remote( + rref_owner, build_sparse_tensor, args=(False, True,) + ) + else: + rref_t1 = rpc.remote( + rref_owner, _torch_ones, args=((3, 3),), kwargs={"requires_grad": True} + ) + if callee == rref_owner: + rref = rpc.remote(callee, my_rref_add, args=(rref_t1, t2)) + else: + rref = rpc.remote( + callee, my_nested_rref_add, args=(rref_owner, rref_t1, t2) + ) + ret = rref.to_here() + if sparse: + ret = torch.sparse.sum(ret) + else: + ret = ret.sum() + dist_autograd.backward(context_id, [ret]) + + # verify grads on caller + grads = dist_autograd.get_gradients(context_id) + self.assertIn(t2, grads) + self.assertEqual(grads[t2], t2.grad) + + # verify grads on rref owner + self.assertTrue( + rpc.rpc_sync( + rref_owner, + _compare_owner_value, + args=(context_id, rref_t1, t1.grad), + ) + ) + + # In this test, every rank will serve as a parameter server (ps) and a + # driver, and then kicks off trainers on the other three ranks. So, we have: + # ps = rank0 with trainers = rank1/2/3 + # ps = rank2 with trainers = rank2/3/0 + # ps = rank3 with trainers = rank3/0/1 + # ps = rank4 with trainers = rank0/1/2 + # + # These four test ps-trainer groups run on completely separate autograd + # graphs, but they share the same set of underlying RpcAgents. + def _test_trainer_ps(self, create_ref_fn, trainer_fn, sparse): + if sparse: + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + else: + t1 = torch.ones((3, 3), requires_grad=True) + t2 = torch.zeros((3, 3), requires_grad=True) + + local_ret = torch.add(t1, t2) + if sparse: + torch.sparse.sum(local_ret).backward() + else: + local_ret.sum().backward() + + # create rref on self + rref_t1 = rpc.remote( + worker_name(self.rank), + create_ref_fn, + args=()) + + # kick off forward and backward pass on three other workers (trainers) + rank_diffs = [1, 2, 3] + futures = [] + for rank_diff in rank_diffs: + futures.append( + rpc.rpc_async( + worker_name((self.rank + rank_diff) % self.world_size), + trainer_fn, + args=(rref_t1, t2, worker_name(self.rank), rank_diff, sparse), + ) + ) + + # check if the trainers have done with their backward pass + for rank_diff in rank_diffs: + self._check_rpc_done(rank_diff) + + # trainers are done and holding the context for verification + accumulate_grad_func = None + for rank_diff in rank_diffs: + # make sure grads are accumulated for the same tensors and values + # are all correct + ctx_id = ctx_ids[rank_diff] + grads = dist_autograd.get_gradients(ctx_id) + local_t1 = rref_t1.to_here() + self.assertIn(local_t1, grads) + self.assertEqual(grads[local_t1], t1.grad) + + # unblock trainers + _set_rpc_done(None, 0) + + # wait until all trainers are done + torch.futures.wait_all(futures) + + def _backward_multiple_round_trips(self, t1, t2, t3, t4, t5, local_grads, sparse): + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + # Multiple RPCs between different nodes. + val = self._exec_func(exec_mode, torch.add, t1, t2) + val = self._exec_func(exec_mode, torch.mul, t3, val) + s1 = self._exec_func(exec_mode, torch.stack, (t4, val)) + s2 = self._exec_func(exec_mode, torch.stack, (t5, val)) + if sparse: + val = self._exec_func(exec_mode, torch.mul, s1, s2) + val = self._exec_func(exec_mode, torch.mul, val, val) + loss = torch.sparse.sum(val) + else: + val = self._exec_func(exec_mode, torch.bmm, s1, s2) + val = self._exec_func(exec_mode, torch.matmul, val, val) + loss = val.sum() + + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2, t3, t4, t5 + ) + local_grads = ret if ret else local_grads + + def _backward_different_dtypes(self, t1, t2, sparse): + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + loss = self._exec_func(exec_mode, torch.add, t1, t2) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + local_grads = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + + # Run the same code locally and with dist autograd and verify gradients + # are same. + def _backward_simple_python_udf(self, t1, t2, sparse): + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func(exec_mode, my_py_add, t1, t2) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + local_grads = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + + # Run the same code locally and with dist autograd and verify gradients + # are same. + def _backward_simple_script_call(self, t1, t2, sparse): + local_grads = None + for exec_mode in [ + ExecMode.LOCAL, + ExecMode.RPC_SYNC, + ExecMode.RPC_ASYNC, + ExecMode.REMOTE, + ]: + with dist_autograd.context() as context_id: + forward_ret = self._exec_func(exec_mode, my_script_add, t1, t2) + if sparse: + loss = torch.sparse.sum(forward_ret) + else: + loss = forward_ret.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + def _nested_backward_accumulate_grads(self, t1, t2, sparse): + with dist_autograd.context() as context_id: + ret = rpc.rpc_sync( + worker_name(self._next_rank()), + DistAutogradTest._test_nested_backward_accumulate_grads, + args=(t1, t2, self._next_rank()), + ) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + # Run backward twice. + dist_autograd.backward(context_id, [loss], retain_graph=True) + dist_autograd.backward(context_id, [loss]) + + def _backwards_nested_python_udf(self, t1, t2, sparse): + t3 = t1 * t2 + t4 = t1 + t2 + res = t3 + t4 + loss = t1 * t2 * t3 * t4 * res + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + torch.autograd.backward([loss]) + + # Now run distributed autograd. + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + DistAutogradTest._nested_python_udf, + args=(t1, t2, self._next_rank()), + ) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + dist_autograd.backward(context_id, [loss]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(t1.grad, grads[t1]) + self.assertEqual(t2.grad, grads[t2]) + + def _mixed_requires_grad(self, t1, t2, sparse): + for exec_mode in [ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func( + exec_mode, DistAutogradTest._mixed_requires_grad_operaton, t1, t2 + ) + self.assertEqual(t1 * t2, ret) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + dist_autograd.backward(context_id, [loss]) + self.assertTrue(t1.requires_grad) + self.assertFalse(t2.requires_grad) + grads = dist_autograd.get_gradients(context_id) + self.assertIn(t1, grads) + self.assertNotIn(t2, grads) + self.assertEqual(t2, grads[t1]) + + def _multiple_backward(self, t1, t2, sparse): + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + torch.add, + args=(t1, t2)) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + # Run backward in a loop multiple times. + for i in range(1000): + dist_autograd.backward(context_id, [loss], retain_graph=True) + + # For current context, this rank sends t1 and t2 tensors to dst_rank, + # then get t3 = torch.add(t1, t2) result tensor. + # For the current context in this rank, it expects graph like this: + # send function: + # rpcSendBackward + # / \ + # t1.AccumulateGrad t2.AccumulateGrad + # + # recv function: + # + # | + # t3.rpcRecvBackward + # + def _verify_graph_for_first_rpc_call( + self, send_function, recv_function, t1, t2, ret + ): + # Retrieve the next functions in the graph. + next_funcs = send_function.next_functions + self.assertEqual(2, len(next_funcs)) + + # We should now hit t1 and t2 in the autograd graph. + self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[0][0].name()) + self.assertEqual(t1, next_funcs[0][0].variable) + self.assertEqual(0, next_funcs[0][1]) + self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[1][0].name()) + self.assertEqual(t2, next_funcs[1][0].variable) + self.assertEqual(0, next_funcs[1][1]) + + # Test recv functions. + self.assertEqual(ret.grad_fn, recv_function) + + # Run the same code locally and with dist autograd and verify gradients + # are same. + def _backward_simple(self, dst, t1, t2, local_grads, sparse): + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func_with_dst( + dst, exec_mode, torch.add, t1, t2 + ) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + # For a context passed from previous nested chain calls, this rank + # receives two tensors t1 and t2, executes torch.add(t1, t2) and sends + # result tensor t3 back. + # For this context in this rank, it expects graph like this: + # send and recv functions: + # rpcSendBackward + # | + # t3.AddBackward0 + # / \ + # t1.recvRpcBackward t2.recvRpcBackward + def _verify_graph_for_rpc_call_exec(self, send_function): + # Verify next function is AddBackward0 + next_funcs = send_function.next_functions + self.assertEqual(1, len(next_funcs)) + add_backward_fn = next_funcs[0][0] + self.assertEqual("AddBackward0", add_backward_fn.name()) + + # Verify the next two functions are the same recv backward function. + next_funcs = add_backward_fn.next_functions + self.assertEqual(2, len(next_funcs)) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name() + ) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name() + ) + self.assertEqual(next_funcs[0][0], next_funcs[1][0]) + + # For a context passed from previous nested chain calls, this rank + # receives two tensors t1 and t2, forwards t1 and t2 tensors using + # nested rpc call to next dst. In return route, receive result tensor t3 + # from next dst and forwarding t3 back to previous calls. + # For this context in this rank, it expects graph like this: + # send and recv functions for receiving and forwarding t1 and t2: + # rpcSendBackward + # / \ + # t1.recvRpcBackward t2.recvRpcBackward + # send and recv functions for receiving and forwarding t3: + # rpcSendBackward + # | + # t3.recvRpcBackward + def _verify_graph_for_nested_rpc_call(self, ctx): + send_functions = ctx._send_functions() + self.assertEqual(2, len(send_functions)) + + # For send function when making nest rpc call, + # next functions of the send function are two recv functions + # for received two tensors from previous call + next_funcs = next(iter(send_functions.values())).next_functions + self.assertEqual(2, len(next_funcs)) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name() + ) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name() + ) + self.assertEqual(next_funcs[0][0], next_funcs[1][0]) + + # For send function when returning response to previous call + # next function of the send function is the recv function + # for received tensor result returned from nested call + next_funcs = list(send_functions.values())[1].next_functions + self.assertEqual(1, len(next_funcs)) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name() + ) + + +class TensorPipeAgentDistAutogradTest(CommonDistAutogradTest): + + # Sparse tests only work with TensorPipeAgent. + @dist_init + def test_graph_for_builtin_call_sparse(self): + self._test_graph(torch.add, ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_python_call_sparse(self): + self._test_graph(my_py_add, ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_builtin_remote_call_sparse(self): + self._test_graph(torch.add, ExecMode.REMOTE, True) + + @dist_init + def test_graph_for_python_remote_call_sparse(self): + self._test_graph(my_py_add, ExecMode.REMOTE, True) + + @dist_init + def test_graph_for_py_nested_call_sparse(self): + self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_py_nested_remote_call_sparse(self): + self._test_graph_for_py_nested_call(ExecMode.REMOTE, True) + + @dist_init + def test_graph_for_py_nested_call_itself_sparse(self): + self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_py_nested_remote_call_itself_sparse(self): + self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, True) + + @dist_init + def test_no_graph_with_tensors_not_require_grad_sparse(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, True) + + @dist_init + def test_no_graph_with_tensors_not_require_grad_remote_sparse(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, True) + + @dist_init + def test_rpc_complex_args_sparse(self): + self._test_rpc_complex_args(ExecMode.RPC_SYNC, True) + + @dist_init + def test_remote_complex_args_sparse(self): + self._test_rpc_complex_args(ExecMode.REMOTE, True) + + @dist_init + def test_context_cleanup_tensor_with_grad_sparse(self): + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add) + + @dist_init + def test_context_cleanup_tensor_no_grad_sparse(self): + t1 = build_sparse_tensor(requires_grad=False) + self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add) + + @dist_init + def test_context_cleanup_nested_rpc_sparse(self): + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + dst_rank = (self.rank + 1) % self.world_size + args = (t1, t2, dst_rank, self.world_size, 0) + self.context_cleanup_test_helper( + rpc_args=args, func=my_py_nested_call, nested=True + ) + + @dist_init + def test_backward_no_grad_on_tensor_sparse(self): + self._backward_no_grad_on_tensor( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_backward_simple_sparse(self): + self._backward_simple( + self._next_rank(), + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_simple_self_sparse(self): + self._backward_simple( + self.rank, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_rref_multi_sparse(self): + if self.rank > 0: + callee = "worker0" + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_rref_sparse(self): + callee = worker_name(self._next_rank()) + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_rref_nested_sparse(self): + callee = worker_name((self.rank + 1) % self.world_size) + rref_owner = worker_name((self.rank + 2) % self.world_size) + self._backward_rref( + callee, + rref_owner, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_trainer_ps_sparse(self): + self._test_trainer_ps( + build_sparse_tensor, + _run_trainer, + True + ) + + @dist_init + def test_backward_multiple_round_trips_sparse(self): + self._backward_multiple_round_trips( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=False), + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=False), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_different_dtypes_sparse(self): + self._backward_different_dtypes( + build_sparse_tensor(requires_grad=True, dtype=torch.float32), + build_sparse_tensor(requires_grad=True, dtype=torch.float64), + True + ) + + @dist_init + def test_backward_simple_python_udf_sparse(self): + self._backward_simple_python_udf( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_backward_simple_script_call_sparse(self): + self._backward_simple_script_call( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_nested_backward_accumulate_grads_sparse(self): + self._nested_backward_accumulate_grads( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_backwards_nested_python_udf_sparse(self): + # Run equivalent of _nested_python_udf locally. + self._backwards_nested_python_udf( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_mixed_requires_grad_sparse(self): + self._mixed_requires_grad( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=False), + True + ) + + @dist_init + def test_multiple_backward_sparse(self): + self._multiple_backward( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_embedding_bag_with_no_grad_tensors(self): + dst = self._next_rank() + remote_embedding = rpc.remote( + worker_name(dst), + torch.nn.EmbeddingBag, + args=(16, 16), + kwargs={"mode": "sum", "sparse": True}, + ) + local_embedding = torch.nn.EmbeddingBag(16, 16, mode="sum", sparse=True) + + input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9]) + # requires_grad = True to record send/recv functions + per_sample_weights = torch.rand((8), requires_grad=True) + offsets = torch.LongTensor([0, 4]) + + local_res = local_embedding(input, offsets, per_sample_weights) + + # Run backward twice. + torch.autograd.backward([local_res.sum()], retain_graph=True) + torch.autograd.backward([local_res.sum()]) + local_grad = local_embedding.weight.grad + + with dist_autograd.context() as context_id: + res = rpc.rpc_sync( + worker_name(dst), + DistAutogradTest._call_remote_embedding, + args=(remote_embedding, input, offsets, per_sample_weights), + ) + + # Run backward twice to test accumulation of sparse gradients. + dist_autograd.backward(context_id, [res.sum()], retain_graph=True) + dist_autograd.backward(context_id, [res.sum()]) + + remote_grad = rpc.rpc_sync( + worker_name(dst), + DistAutogradTest._get_grad, + args=(remote_embedding, context_id), + ) + + self.assertEqual(local_grad, remote_grad) + + +class DistAutogradTest(CommonDistAutogradTest): + @dist_init + def test_autograd_context(self): + # Verify max possible id. + max_auto_increment = 281474976710655 + self.assertEqual( + max_auto_increment + (self.worker_id << 48), dist_autograd._get_max_id() + ) + + context_ids = [] + for i in range(200): + with dist_autograd.context() as context_id: + self.assertEqual( + context_id, + dist_autograd._retrieve_context(context_id)._context_id(), + ) + # First 16 bits should be worker_id. + self.assertEqual(self.worker_id, context_id >> 48) + context_ids.append(context_id) + + for context_id in context_ids: + with self.assertRaisesRegex( + RuntimeError, + f"Could not find autograd context with id: {context_id}", + ): + dist_autograd._retrieve_context(context_id) + + @dist_init + def test_nested_context(self): + with dist_autograd.context() as context_id: + # Nested contexts not supported. + with self.assertRaisesRegex( + RuntimeError, "Already have an autograd context id for this thread" + ): + with dist_autograd.context() as context_id: + pass + + @dist_init + def test_graph_for_builtin_call(self): + self._test_graph(torch.add, ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_python_call(self): + self._test_graph(my_py_add, ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_builtin_remote_call(self): + self._test_graph(torch.add, ExecMode.REMOTE, False) + + @dist_init + def test_graph_for_python_remote_call(self): + self._test_graph(my_py_add, ExecMode.REMOTE, False) + + @dist_init + def test_graph_for_py_nested_call(self): + self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_py_nested_remote_call(self): + self._test_graph_for_py_nested_call(ExecMode.REMOTE, False) + + @dist_init + def test_graph_for_py_nested_call_itself(self): + self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_py_nested_remote_call_itself(self): + self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, False) + + @dist_init + def test_no_graph_with_tensors_not_require_grad(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, False) + + @dist_init + def test_no_graph_with_tensors_not_require_grad_remote(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, False) + + def _test_grad_only_on_return_value(self, exec_mode): + initialize_pg(self.file_init_method, self.rank, self.world_size) + dst_rank = (self.rank + 1) % self.world_size + with dist_autograd.context() as context_id: + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync(worker_name(dst_rank), ret_requires_grad) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), ret_requires_grad + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + dist_autograd.backward(context_id, [ret.sum()]) + + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + + # Wait for the prev rank to be done with rpc. + self._check_rpc_done(1) + grads = dist_autograd.get_gradients(ctx_ids[1]) + self.assertEqual(1, len(grads)) + self.assertIn(requires_grad_tensor, grads) + self.assertEqual(torch.ones_like(ret), grads[requires_grad_tensor]) + # due to the above get_gradients call, ensure that dist autograd + # contexts aren't cleaned up until all workers exit context managers + dist.barrier() + + @dist_init + def test_grad_only_on_return_value(self): + self._test_grad_only_on_return_value(ExecMode.RPC_SYNC) + + @dist_init + def test_grad_only_on_return_value_remote(self): + self._test_grad_only_on_return_value(ExecMode.REMOTE) + + @dist_init + def test_rpc_complex_args(self): + self._test_rpc_complex_args(ExecMode.RPC_SYNC, False) + + @dist_init + def test_remote_complex_args(self): + self._test_rpc_complex_args(ExecMode.REMOTE, False) + + @dist_init + def test_context_cleanup_tensor_with_grad(self): + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add) + + @dist_init + def test_context_cleanup_tensor_no_grad(self): + t1 = torch.ones(3, 3, requires_grad=False) + self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add) + + @dist_init + def test_context_cleanup_no_tensors(self): + self.context_cleanup_test_helper(rpc_args=(1, 1), func=my_scalar_add) + + @dist_init + def test_context_cleanup_nested_rpc(self): + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + dst_rank = (self.rank + 1) % self.world_size + args = (t1, t2, dst_rank, self.world_size, 0) + self.context_cleanup_test_helper( + rpc_args=args, func=my_py_nested_call, nested=True + ) + + @dist_init + def test_worker_ids_recorded(self): + dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank} + with dist_autograd.context() as context_id: + # if no tensors require grad, we should still record worker_ids, as + # the autograd context ID is still passed to other workers. + t1 = torch.ones(3, 3, requires_grad=False) + t2 = torch.zeros(3, 3, requires_grad=False) + for dst_rank in dst_ranks: + rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2)) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + # all worker_ids in dst_ranks should be recorded. + ctx = dist_autograd._current_context() + worker_ids = ctx._known_worker_ids() + self.assertEqual(worker_ids, dst_ranks) + + # worker_ids should be recorded when tensors do require grad + t1.requires_grad = True + t2.requires_grad = True + for dst_rank in dst_ranks: + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(t1, t2) + ) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + # all worker_ids in dst_ranks should be recorded. + worker_ids = ctx._known_worker_ids() + self.assertEqual(worker_ids, dst_ranks) + + @dist_init + def test_dist_autograd_profiling(self): + with dist_autograd.context() as context_id: + t1 = torch.rand(3, 3, requires_grad=True) + t2 = torch.rand(3, 3, requires_grad=True) + loss = rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t1, t2)).sum() + with torch.autograd.profiler.profile() as p: + dist_autograd.backward(context_id, [loss]) + + function_events = p.function_events + + def get_event(partial_key): + return next(event for event in function_events if partial_key in event.name) + + send_event = get_event("SendRpcBackward") + recv_event = get_event("RecvRpcBackward") + backward_event = get_event("torch::distributed::autograd::backward") + # There should be at least 1 send and recv_events each, corresponding to send/recv functions executed. + self.assertEqual(send_event.count, 1) + self.assertEqual(recv_event.count, 1) + # The CPU total for backward event should be great than send and recv, since + # applying those functions in the backwards pass is a subset of the entire backward pass. + self.assertGreater(backward_event.cpu_time_total, send_event.cpu_time_total) + self.assertGreater(backward_event.cpu_time_total, recv_event.cpu_time_total) + + @dist_init + def test_error_in_context(self): + with dist_autograd.context() as context_id: + t1 = torch.rand(3, 3, requires_grad=True) + t2 = torch.rand(6, 6, requires_grad=True) + + with self.assertRaises(RuntimeError): + # This should throw an error since matrix sizes don't match. + rpc.rpc_sync( + worker_name(self._next_rank()), torch.matmul, args=(t1, t2) + ) + + @dist_init + def test_backward_no_grad_on_tensor(self): + self._backward_no_grad_on_tensor( + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + False + ) + + @dist_init + def test_backward_simple(self): + self._backward_simple( + self._next_rank(), + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_simple_self(self): + self._backward_simple( + self.rank, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_rref(self): + callee = worker_name(self._next_rank()) + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_rref_multi(self): + if self.rank > 0: + callee = "worker0" + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_rref_nested(self): + callee = worker_name((self.rank + 1) % self.world_size) + rref_owner = worker_name((self.rank + 2) % self.world_size) + self._backward_rref( + callee, + rref_owner, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_trainer_ps(self): + self._test_trainer_ps( + create_tensor, + _run_trainer, + False + ) + + @dist_init + def test_trainer_ps_torchscript_functions(self): + # TODO, need more investigation + # there is rref leak when shutting down, suspect it is because + # ref as arg is passed to pybind boundary, and the ref is not garbage + # collected by python when calling shutdown() + import torch.distributed.rpc.api as api + api._ignore_rref_leak = True + + self._test_trainer_ps(create_torchscript_tensor, _run_trainer_torchscript, False) + + @dist_init + def test_backward_multiple_round_trips(self): + self._backward_multiple_round_trips( + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3)), + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3)), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_different_tensor_dims(self): + local_grads = None + t1 = torch.rand((4, 6), requires_grad=True) + t2 = torch.rand((6, 5)) + t3 = torch.rand((5, 7), requires_grad=True) + t4 = torch.rand((7, 9)) + + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + val = self._exec_func(exec_mode, torch.matmul, t1, t2) + val = self._exec_func(exec_mode, torch.linalg.multi_dot, (val, t3, t4)) + loss = val.sum() + + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2, t2, t3, t4 + ) + local_grads = ret if ret else local_grads + + @dist_init + def test_backward_unused_tensors(self): + local_grads = None + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + t3 = torch.rand((3, 3), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + s = self._exec_func(exec_mode, torch.stack, (t1, t2, t3)) + val = self._exec_func( + exec_mode, + torch.matmul, + torch.narrow(s, 0, 0, 1), + torch.narrow(s, 0, 2, 1), + ) + + loss = val.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2, t3 + ) + local_grads = ret if ret else local_grads + + @dist_init + def test_backward_multiple_output_tensors(self): + local_grads = None + t = torch.rand((10, 2), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + tensor_list = self._exec_func(exec_mode, torch.split, t, 2) + t1 = tensor_list[0] + t2 = tensor_list[2] + t3 = tensor_list[4] + + val = self._exec_func(exec_mode, torch.linalg.multi_dot, (t1, t2, t3)) + + loss = val.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t + ) + local_grads = ret if ret else local_grads + + def _run_test_backward_unused_send_function_in_thread(self): + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + + # We don't use the result of an RPC function, as a result the + # backward pass would hang in the "FAST" mode. + res = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t1, t2) + ) + + val = torch.mul(t1, t2) + + # Run backward, this would hang forever. + dist_autograd.backward(context_id, [val.sum()]) + + @dist_init + def test_backward_unused_send_function(self): + # Run the test in a thread which would never finish. + t = threading.Thread( + target=self._run_test_backward_unused_send_function_in_thread + ) + t.daemon = True + t.start() + t.join(10) # Wait for 10s. + + # Verify thread is still alive (indicating backward hasn't completed yet). + self.assertTrue(t.is_alive()) + + @dist_init + def test_backward_autograd_engine_error(self): + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + # Perform some ops before error simulation. + tmp = (t1 + t2) * (t1 + t2) + t3 = SimulateBackwardError.apply(tmp) + + # Run multiple round trips across different nodes and verify the + # original node receives an error thrown on a node deep in the chain. + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t2, t3) + ) + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.mul, args=(val, t2) + ) + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.matmul, args=(val, t2) + ) + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.div, args=(val, t2) + ) + + with self.assertRaisesRegex( + RuntimeError, "Error on Node [0-9]+: Simulate error on backward pass" + ): + # Run backwards, and validate we receive an error. + dist_autograd.backward(context_id, [val.sum()]) + + @dist_init(clean_shutdown=False) + @skip_but_pass_in_sandcastle_if( + IS_MACOS, + "Test is flaky on MacOS since libuv error handling is not as robust as TCP", + ) + def test_backward_node_failure(self): + rpc._set_rpc_timeout(5) # 5 seconds + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + res = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t1, t2) + ) + + # Wait for all RPCs to be done. + dist.barrier() + + # Kill all odd rank nodes. + if self.rank % 2 == 0: + shutdown_error_regex = self.get_shutdown_error_regex() + # Wait for all other nodes to die. + for rank in range(self.world_size): + if rank % 2 != 0: + wait_until_node_failure(rank, shutdown_error_regex) + + # Shutdown sequence is not very well defined and as a result + # we might see any error given by get_shutdown_error_regex() + with self.assertRaisesRegex(RuntimeError, shutdown_error_regex): + # Run backwards, and validate we receive an error since all + # other nodes are dead. + dist_autograd.backward(context_id, [res.sum()]) + else: + # Exit all other nodes. + pass + + @dist_init + def test_backward_without_context(self): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + + context_id = 100 # dummy context_id + with self.assertRaisesRegex( + RuntimeError, + f"Could not find autograd context with id: {context_id}", + ): + res = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t1, t2) + ) + dist_autograd.backward(context_id, [res.sum()]) + + @dist_init + def test_backward_without_rpc(self): + dst_rank = self.rank + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + t3 = torch.add(t1, t2) + + dist_autograd.backward(context_id, [t3.sum()]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(2, len(grads)) + self.assertIn(t1, grads) + self.assertIn(t2, grads) + self.assertEqual(torch.ones(3, 3), grads[t1]) + self.assertEqual(torch.ones(3, 3), grads[t2]) + + @dist_init + def test_backward_invalid_args(self): + with dist_autograd.context() as context_id: + + with self.assertRaisesRegex(TypeError, "incompatible function arguments"): + dist_autograd.backward(context_id, None) + + with self.assertRaisesRegex(TypeError, "incompatible function arguments"): + dist_autograd.backward(None, None) + + with self.assertRaisesRegex( + RuntimeError, "No tensors provided for gradient computation" + ): + dist_autograd.backward(context_id, []) + + with self.assertRaisesRegex(RuntimeError, "requires_grad not set on"): + t = torch.rand(3, 3) + dist_autograd.backward(context_id, [t]) + + with self.assertRaisesRegex( + RuntimeError, "is not a scalar, all roots need to be scalar" + ): + t = torch.rand(3, 3, requires_grad=True) + dist_autograd.backward(context_id, [t]) + + with self.assertRaisesRegex( + RuntimeError, "does not have a valid gradient function" + ): + t = torch.rand(1, requires_grad=True) + dist_autograd.backward(context_id, [t]) + + @dist_init + def test_backward_multiple_roots(self): + local_grads = None + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]: + with dist_autograd.context() as context_id: + r1 = self._exec_func(exec_mode, torch.add, t1, t2).sum() + r2 = self._exec_func(exec_mode, torch.mul, t1, t2).sum() + r3 = self._exec_func(exec_mode, torch.cos, t1).sum() + r4 = self._exec_func(exec_mode, torch.div, t1, t2).sum() + + local_grads = self._verify_backwards( + exec_mode, [r1, r2, r3, r4], context_id, local_grads, t1, t2 + ) + + @dist_init + def test_backward_different_dtypes(self): + self._backward_different_dtypes( + torch.rand((3, 3), requires_grad=True, dtype=torch.float32), + torch.rand((3, 3), requires_grad=True, dtype=torch.float64), + False + ) + + @dist_init + def test_backward_simple_python_udf(self): + self._backward_simple_python_udf( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @dist_init + def test_backward_simple_script_call(self): + self._backward_simple_script_call( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @staticmethod + def _complex_python_udf(t1, t2): + t3 = torch.nn.functional.linear(t1, t2) + t4 = torch.nn.functional.linear(t2, t3) + t5 = torch.nn.functional.linear(t3, t4) + return torch.linalg.multi_dot([t1, t2, t3, t4, t5]) + + @dist_init + def test_backward_complex_python_udf(self): + # Run the same code locally and with dist autograd and verify gradients + # are same. + local_grads = None + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func( + exec_mode, DistAutogradTest._complex_python_udf, t1, t2 + ) + loss = ret.sum() + local_grads = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + + @staticmethod + def _python_udf_with_backward_error(t1, t2): + t3 = t1 + t2 + t4 = SimulateBackwardError.apply(t3) + return torch.linalg.multi_dot([t1, t2, t3, t4]) + + @staticmethod + def _nested_rpc_call_backward_error(t1, t2, dst): + t1 = t1 * t2 + t2 = t1 + t2 + res = rpc.rpc_sync( + worker_name(dst), + DistAutogradTest._python_udf_with_backward_error, + args=(t1, t2), + ) + return torch.linalg.multi_dot([t1, t2, res]) + + @dist_init + def test_backward_python_udf_error(self): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + DistAutogradTest._nested_rpc_call_backward_error, + args=(t1, t2, self._next_rank()), + ) + with self.assertRaisesRegex( + RuntimeError, "Simulate error on backward pass" + ): + dist_autograd.backward(context_id, [loss.sum()]) + + _backward_done = False + + @dist_init(clean_shutdown=False) + @skip_but_pass_in_sandcastle_if( + IS_MACOS, + "Test is flaky on MacOS since libuv error handling is not as robust as TCP", + ) + def test_backward_node_failure_python_udf(self): + # Set a short timeout to quickly time out failed RPCs. + rpc._set_rpc_timeout(5) # 5 seconds + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + + dst = self._next_rank() + res = rpc.rpc_sync( + worker_name(dst), + my_py_nested_call, + args=(t1, t2, dst, self.world_size, 1), + ) + + dist.barrier() + + # Kill rank 2 (last hop of nested rpc) and verify rank 0 receives an error. + if self.rank == 2: + return + + store = dist.distributed_c10d._get_default_store() + if self.rank == 0: + # Wait for rank 2 to die. + shutdown_error_regex = self.get_shutdown_error_regex() + wait_until_node_failure(2, shutdown_error_regex) + # Shutdown sequence is not very well defined and as a result + # we might see any error given by get_shutdown_error_regex(). + with self.assertRaisesRegex(RuntimeError, shutdown_error_regex): + # Run backwards, and validate we receive an error since rank 2 is dead. + dist_autograd.backward(context_id, [res.sum()]) + + # Mark rank 0 is done in the store, since the RPC framework on + # some nodes might be broken at this point. + store.set('test_backward_node_failure_python_udf_rank0_done', "True") + else: + # Wait for backward to finish on rank 0. + store.wait(['test_backward_node_failure_python_udf_rank0_done'], timedelta(seconds=10)) + + @staticmethod + def _nested_python_udf(t1, t2, dst): + t3 = t1 * t2 + t4 = t1 + t2 + res = rpc.rpc_sync(worker_name(dst), my_py_add, args=(t3, t4)) + return t1 * t2 * t3 * t4 * res + + @dist_init + def test_backwards_nested_python_udf(self): + # Run equivalent of _nested_python_udf locally. + self._backwards_nested_python_udf( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + _test_clean_context_backward_context_id = None + + class MyBackwardFunc(Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + @once_differentiable + def backward(ctx, input): + assert DistAutogradTest._test_clean_context_backward_context_id is not None + + # Release the context to simulate error (use barrier before releasing + # context to ensure all nodes execute the backward function). + dist.barrier() + dist_autograd._release_context( + DistAutogradTest._test_clean_context_backward_context_id + ) + + # Verify all contexts are cleaned up. + assert _all_contexts_cleaned_up() + + return input + + @dist_init + def test_clean_context_during_backward(self): + """ + This test simulates the situation where the 'backward' call might throw + an exception locally which would lead to the autograd context being + cleaned up if we're using the context manager. As a result, the autograd + context might be cleaned up while some threads are still using the + autograd context. + + It is fine for the 'backward' call to throw an exception in this test, + but the process should not crash. + """ + initialize_pg(self.file_init_method, self.rank, self.world_size) + + context = dist_autograd._new_context() + context_id = context._context_id() + DistAutogradTest._test_clean_context_backward_context_id = context_id + + # Send the context id to all nodes. + for i in range(0, self.world_size): + if i != self.rank: + rank_distance = (i - self.rank + self.world_size) % self.world_size + rpc.rpc_sync( + worker_name(i), + _set_rpc_done, + args=(context_id, rank_distance), + ) + + dist.barrier() + + # Verify all context ids have been received. + self.assertEqual(self.world_size - 1, len(known_context_ids)) + + t1 = torch.rand((3, 3), requires_grad=True) + for i in range(0, 100): + dst = self._next_rank() + t1 = rpc.rpc_sync(worker_name(dst), torch.add, args=(t1, t1)) + + # Call MyBackwardFunc as the first op of the backward pass to + # ensure we release the context early in the backward pass. + t1 = DistAutogradTest.MyBackwardFunc.apply(t1) + self.assertEqual(100, len(context._send_functions())) + + context_id = 100 # dummy context_id + with self.assertRaisesRegex( + RuntimeError, + f"Could not find autograd context with id: {context_id}", + ): + dist_autograd.backward(context_id, [t1.sum()]) + + # HACK: Killing workers since otherwise the autograd engine gets stuck on + # other nodes. The proper fix would be addressing: + # https://github.com/pytorch/pytorch/issues/27643, which would inform + # other nodes about the failure. + # The autograd engine gets stuck on other nodes since they're waiting to + # receive gradients from the node that received an error (and as a + # result it didn't execute the rest of the graph). + dist.barrier() + rpc.shutdown(graceful=False) + sys.exit(0) + + @classmethod + def _call_remote_embedding(cls, embedding_rref, input, offsets, per_sample_weights): + embedding = embedding_rref.local_value() + return embedding(input, offsets, per_sample_weights) + + @classmethod + def _get_grad(cls, embedding_rref, context_id): + embedding = embedding_rref.local_value() + grad_map = dist_autograd.get_gradients(context_id) + return grad_map[embedding.weight] + + @classmethod + def _mixed_requires_grad_operaton(cls, t1, t2): + if t2.requires_grad: + return t1 - t2 + else: + return t1 * t2 + + @dist_init + def test_mixed_requires_grad(self): + self._mixed_requires_grad( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=False), + False + ) + + class TestDebugInfoFunc(Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + @once_differentiable + def backward(ctx, input): + debug_info = dist_autograd._get_debug_info() + assert debug_info is not None + backward_passes = int(debug_info["num_current_backward_passes"]) + + # Hard to validate exact numbers because of the distributed nature. + # We can't use a barrier() here since that would block the single + # CPU thread available for autograd and can cause deadlocks. + assert backward_passes >= 1 and backward_passes <= 4 + return input + + @dist_init + def test_debug_info(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + i = 0 + res = {} + res[i] = t1 + for rank in range(self.world_size): + if rank != self.rank: + res[i + 1] = rpc.rpc_sync( + worker_name(rank), torch.add, args=(res[i], t2) + ) + i += 1 + + # Call custom function in middle of backward pass to ensure all + # nodes are still waiting on a backward(). + res[i + 1] = DistAutogradTest.TestDebugInfoFunc.apply(res[i]) + i += 1 + + for rank in range(self.world_size): + if rank != self.rank: + res[i + 1] = rpc.rpc_sync( + worker_name(rank), torch.add, args=(res[i], t2) + ) + i += 1 + + dist_autograd.backward(context_id, [res[i].sum()]) + + debug_info = dist_autograd._get_debug_info() + num_autograd_context = int(debug_info["num_autograd_contexts"]) + # Need atleast one context and not more than 4. + self.assertTrue(num_autograd_context >= 1 and num_autograd_context <= 4) + + for rd in range(self.world_size - 1): + rpc.rpc_sync( + worker_name((self.rank + rd + 1) % self.world_size), + _set_rpc_done, + args=(context_id, rd + 1), + ) + + dist.barrier() + + # Validate information + debug_info = dist_autograd._get_debug_info() + assert debug_info is not None + self.assertEqual(0, int(debug_info["num_current_backward_passes"])) + # only have `num_current_backward_passes` and `num_autograd contexts` + self.assertTrue(len(debug_info) == 2) + + self.assertTrue(_all_contexts_cleaned_up()) + + # All contexts should be cleaned up. + debug_info = dist_autograd._get_debug_info() + self.assertEqual(0, int(debug_info["num_autograd_contexts"])) + + @staticmethod + def _workload_thread(): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + t3 = rpc.rpc_sync("worker0", torch.add, args=(t1, t2)) + t4 = rpc.rpc_sync("worker0", torch.mul, args=(t2, t3)) + t5 = rpc.rpc_sync("worker0", torch.matmul, args=(t3, t4)) + t6 = rpc.rpc_sync("worker0", torch.add, args=(t4, t5)) + + dist_autograd.backward(context_id, [t6.sum()]) + + @dist_init + def test_async_dist_autograd(self): + """ + This test ensures async processing for distributed autograd works + appropriately. This is achieved by spawning multiple threads and + hammering a single node with a lot of backward() calls. + """ + + initialize_pg(self.file_init_method, self.rank, self.world_size) + if self.rank != 0: + # All other ranks schedule work on rank 0. + threads = [] + for i in range(20): + t = threading.Thread(target=DistAutogradTest._workload_thread) + t.start() + threads.append(t) + + for thread in threads: + thread.join() + + dist.barrier() + + @dist_init + def test_backward_accumulate_grads(self): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + t3 = torch.matmul(t1, t2) + # Run backward twice. + torch.autograd.backward([t3.sum()], retain_graph=True) + torch.autograd.backward([t3.sum()]) + + t3 = rpc.rpc_sync( + worker_name(self._next_rank()), torch.matmul, args=(t1, t2) + ) + # Run backward twice. + dist_autograd.backward(context_id, [t3.sum()], retain_graph=True) + dist_autograd.backward(context_id, [t3.sum()]) + + # Verify the gradients are same for local and remote execution. + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(2, len(grads)) + self.assertIn(t1, grads) + self.assertIn(t2, grads) + self.assertEqual(t1.grad, grads[t1]) + self.assertEqual(t2.grad, grads[t2]) + + @staticmethod + def _test_nested_backward_accumulate_grads(t1, t2, dst_rank): + return rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2)) + + @dist_init + def test_nested_backward_accumulate_grads(self): + self._nested_backward_accumulate_grads( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @dist_init + def test_multiple_backward(self): + self._multiple_backward( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @dist_init(clean_shutdown=False) + def test_multiple_backward_with_errors(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + f'worker{self._next_rank()}', + DistAutogradTest._python_udf_with_backward_error, + args=(t1, t2)).sum() + + try: + # Run backward in a loop multiple times. + for i in range(100): + if i < 50: + with self.assertRaisesRegex(RuntimeError, "Simulate error on backward pass"): + dist_autograd.backward(context_id, [loss], retain_graph=True) + elif i > 50: + # Recovered from error. + dist_autograd.backward(context_id, [loss], retain_graph=True) + else: + dist.barrier() + SimulateBackwardError._simulate_error = False + dist.barrier() + finally: + # Sync before resetting flag. + dist.barrier() + + # Reset the flag. + SimulateBackwardError._simulate_error = True + + @dist_init + def test_backward_verify_hooks(self): + t1 = torch.ones((3, 3), requires_grad=True) + # Double the gradient. + t1.register_hook(lambda grad: grad * 2) + t2 = torch.ones((3, 3), requires_grad=True) + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func(exec_mode, torch.matmul, t1, t2) + loss = ret.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + @dist_init + def test_no_grad_copy(self): + ''' + Similar to test in test_autograd.py. + ''' + # create autograd function that saves grad pointer as class static + class MyFunc(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp1, inp2): + return inp1 + inp2 + + @staticmethod + def backward(ctx, grad): + MyFunc.static_grad_ptr = grad.data_ptr() + return grad, grad + + class MyFuncSingleGrad(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp): + return inp + + @staticmethod + def backward(ctx, grad): + MyFuncSingleGrad.static_grad_ptr = grad.data_ptr() + return grad + + class NonContGradFunc(Function): + @staticmethod + def forward(ctx, inp1): + ctx.size = inp1.size() + return torch.tensor([1.]) + + @staticmethod + def backward(ctx, grad): + return torch.ones(1).expand(ctx.size) + + a = torch.randn(5, 6, requires_grad=True) + b = torch.randn(5, 6, requires_grad=True) + # non-contiguous grad should be copied + with dist_autograd.context() as context_id: + dist_autograd.backward(context_id, [NonContGradFunc.apply(MyFunc.apply(a, b))]) + grads = dist_autograd.get_gradients(context_id) + self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr) + self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr) + + # test case that should trigger no copy for a + with dist_autograd.context() as context_id: + dist_autograd.backward(context_id, [MyFuncSingleGrad.apply(a)[1][0]]) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFuncSingleGrad.static_grad_ptr + p_a = grads[a].data_ptr() + # Verify there was no clone. + self.assertTrue(p_a == p_g) + + # Test case that should trigger copy for both of a,b. This is + # different in the distributed autograd case since we hold + # a reference to all grads in a vector until all accumulation is done. + with dist_autograd.context() as context_id: + dist_autograd.backward(context_id, [MyFunc.apply(a, b)[1][0]]) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFunc.static_grad_ptr + p_a = grads[a].data_ptr() + p_b = grads[b].data_ptr() + # check a,b uses different grad buffer + self.assertFalse(p_a == p_b) + # both should be copied. + self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr) + self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr) + + @dist_init + def test_no_grad_copy_sparse(self): + # create autograd function that saves grad pointer as class static + class MyFunc(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp): + return inp + + @staticmethod + def backward(ctx, grad): + MyFunc.static_grad_ptr = grad._values().data_ptr() + return grad + + class NonContGradFunc(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp1, inp2): + return inp1 + inp2 + + @staticmethod + def backward(ctx, grad): + # Create a sparse tensor with non-contiguous indices and values + # and return as grad. + v = torch.rand(1, 3) + i = torch.ones(1, 1, dtype=torch.long) + nv = v.expand(8, 3) + ni = i.expand(1, 8) + ngrad = torch.sparse_coo_tensor(ni, nv, (10, 3), dtype=torch.float32) + NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr() + return ngrad, ngrad + + a = torch.randn(10, 3, requires_grad=True) + b = torch.randn(10, 3, requires_grad=True) + input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]) + offsets = torch.tensor([0, 4]) + import torch.nn.functional as F + + # test case that should trigger no copy for a. + with dist_autograd.context() as context_id: + emb_matrix = MyFunc.apply(a) + loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFunc.static_grad_ptr + p_a = grads[a]._values().data_ptr() + # check a uses the same buffer + self.assertTrue(p_a == p_g) + + # Run backwards multiple times. + for i in range(10): + dist_autograd.backward(context_id, [loss], retain_graph=True) + + # non-contiguous indices and value, we should trigger a copy. + with dist_autograd.context() as context_id: + emb_matrix = NonContGradFunc.apply(a, b) + loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + grads = dist_autograd.get_gradients(context_id) + p_g = NonContGradFunc.static_grad_ptr + p_a = grads[a]._values().data_ptr() + p_b = grads[b]._values().data_ptr() + # check a,b uses different grad buffer + self.assertFalse(p_a == p_b) + # Verify we cloned both grads. + self.assertFalse(p_a == p_g) + self.assertFalse(p_b == p_g) + + # Run backwards multiple times to verify accumulation. + for i in range(10): + dist_autograd.backward(context_id, [loss], retain_graph=True) + + @dist_init + def test_grad_copy_sparse_indices_extra_ref(self): + # create autograd function that saves grad pointer as class static + class MyFunc(Function): + static_grad_ptr = None + static_grad_indices_ref = None + static_grad_values_ref = None + + @staticmethod + def forward(ctx, inp): + return inp + + @staticmethod + def backward(ctx, grad): + MyFunc.static_grad_ptr = grad._values().data_ptr() + # indices() and values() return views, so holding onto + # references of them would not increment refcount of indices + # and values inside the sparse tensor. + MyFunc.static_grad_indices_ref = grad._indices() + MyFunc.static_grad_values_ref = grad._values() + return grad + + a = torch.randn(10, 3, requires_grad=True) + input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]) + offsets = torch.tensor([0, 4]) + import torch.nn.functional as F + + with dist_autograd.context() as context_id: + emb_matrix = MyFunc.apply(a) + loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFunc.static_grad_ptr + p_a = grads[a]._values().data_ptr() + self.assertIsNotNone(MyFunc.static_grad_indices_ref) + self.assertIsNotNone(MyFunc.static_grad_values_ref) + # grad would be stolen, since static_grad_indices_ref and + # static_grad_values_ref are holding onto views and don't bump the + # refcount. + self.assertTrue(p_g == p_a) + + @dist_init + def test_post_hooks(self): + self.hook_called_times = 0 + + def post_hook_add_one(output_grads, input_grads): + self.hook_called_times += 1 + return output_grads + + def post_hook_add_two(output_grads, input_grads): + self.hook_called_times += 2 + return output_grads + + t = torch.rand(10, 10, requires_grad=True) + a = t + t + + # Register post hooks + accumulate_grad_0 = a.grad_fn.next_functions[0][0] + accumulate_grad_0.register_hook(post_hook_add_one) + accumulate_grad_0.register_hook(post_hook_add_two) + + accumulate_grad_1 = a.grad_fn.next_functions[1][0] + accumulate_grad_1.register_hook(post_hook_add_two) + + with dist_autograd.context() as context_id: + loss = a.sum() + dist_autograd.backward(context_id, [loss]) + self.assertEqual(5, self.hook_called_times) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(1, len(grads)) + self.assertTrue(t in grads) + + @staticmethod + def _slow_add(t1, t2): + time.sleep(1) + t3 = t1 + t2 + t3.requires_grad = True + return t3 + + @dist_init + def test_thread_local_context_id(self): + t1 = torch.rand((3, 3)) + t2 = torch.rand((3, 3)) + + t3 = t1 + t2 + t3.requires_grad = True + t3.sum().backward() + + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, DistAutogradTest._slow_add, args=(t1, t2)) + + with dist_autograd.context() as context_id: + loss = rref.to_here().sum() + # due to slow add, the continuation of this backward pass will be + # invoked by the previous rpc.remote thread which does not have a + # valid context_id. So, this can test whether we propagate + # thread_local states properly when jumping across threads on the + # server side. + dist_autograd.backward(context_id, [loss]) + self.assertTrue( + rpc.rpc_sync( + dst, + _compare_owner_value, + args=(context_id, rref, t3.grad) + ) + ) + + +class CudaDistAutogradTest(CommonDistAutogradTest): + @skip_if_lt_x_gpu(1) + @dist_init + def test_gpu_simple(self): + t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + t2 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + (t1 + t2).sum().backward() + with dist_autograd.context() as context_id: + t3 = t1 + t2 + dist_autograd.backward(context_id, [t3.sum()]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(2, len(grads)) + self.assertEqual(t1.grad, grads[t1]) + self.assertEqual(t2.grad, grads[t2]) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_gpu_to_cpu_continuation(self): + t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + t2 = torch.rand(3, 3, requires_grad=True) + # Run a few iterations. + for i in range(3): + t1.grad = None + t2.grad = None + # Root is CPU + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]: + with dist_autograd.context() as context_id: + t3 = self._exec_func(exec_mode, torch.add, t2, t2) + t4 = t3.cuda(0) + t1 + t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2) + t6 = t5.cuda(0) + t4 + t7 = self._exec_func(exec_mode, torch.add, t6.cpu(), t5) + # Autograd graph consists of CPU -> GPU -> CPU execution. + ret = self._verify_backwards( + exec_mode, [t7.sum()], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + @skip_if_lt_x_gpu(1) + @dist_init + def test_gpu_to_cpu_continuation_gpu_root(self): + t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + t2 = torch.rand(3, 3, requires_grad=True) + # Run a few iterations. + for i in range(3): + t1.grad = None + t2.grad = None + # Root is CPU + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]: + with dist_autograd.context() as context_id: + t3 = self._exec_func(exec_mode, torch.add, t2, t2) + t4 = t3.cuda(0) + t1 + t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2) + t6 = t5.cuda(0) + t4 + # Autograd graph consists of CPU -> GPU -> CPU execution. + ret = self._verify_backwards( + exec_mode, [t6.sum()], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + +class FaultyAgentDistAutogradTest(RpcAgentTestFixture): + # Reusing a simplified helper function from DistAutogradTest to ensure + # autograd context is successfully cleaned up even when RPCs are failing. + def context_cleanup_test_helper(self, rpc_args, func): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + # test that in dist autograd, in the case that tensors communicated over RPC do + # NOT require grad, we still cleanup the dist autograd contexts created + # on other nodes. This is because the autograd context is still + # communicated over RPC even if tensor arguments do not require grad, as + # it is possible that the response could. + dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank} + + with dist_autograd.context() as context_id: + for dst_rank in dst_ranks: + rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + # the thread's context id should be cleaned up + with self.assertRaises(RuntimeError): + dist_autograd._retrieve_context(context_id) + # Ensure all peers have finished mutating the + # `known_context_ids` set. + dist.barrier() + # check that all contexts have been cleaned up. + success = _all_contexts_cleaned_up() + self.assertTrue(success) + + # no faulty_messages defined so this fails all retryable messages - see + # faulty_rpc_agent_test_fixture.py for the list of retryable messages. + @dist_init + def test_context_cleanup_tensor_with_grad(self): + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add) + + @dist_init + def test_verify_backend_options(self): + self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE) + self.assertEqual(self.rpc_backend_options.num_worker_threads, 8) + self.assertEqual(self.rpc_backend_options.num_fail_sends, 3) + self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4) + + +class WrapperModule(nn.Module): + def __init__(self, model, device): + super().__init__() + self.model = model.to(device) + + def forward(self, *args): + return self.model(*args) + + def gradients(self, ctx_id): + grads = dist_autograd.get_gradients(ctx_id) + return [grads[p] for p in self.model.parameters()] + + +class TensorPipeCudaDistAutogradTest(RpcAgentTestFixture): + + @skip_if_lt_x_gpu(4) + def test_device_maps_backward_pass(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + + # The reverse of this device mapping should be used for the backward pass. + options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + t1 = torch.rand(10, device=self.rank, requires_grad=True) + t2 = torch.rand(10, device=self.rank, requires_grad=True) + with dist_autograd.context() as context_id: + res = rpc.rpc_sync(dst, torch.add, args=(t1, t2)) + dist_autograd.backward(context_id, [res.sum()]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(torch.ones(10), grads[t1]) + self.assertEqual(torch.ones(10), grads[t2]) + self.assertEqual(t1.device, grads[t1].device) + self.assertEqual(t2.device, grads[t2].device) + + rpc.shutdown() + + class MyRemoteCompute(torch.nn.Module): + def forward(self, input): + input = input * 2.0 + return input + + class MyLocalCompute(torch.nn.Module): + def __init__(self, next_stage): + super().__init__() + self.next_stage = next_stage + + def forward(self, input): + return self.next_stage.rpc_sync().forward(input) + + @skip_if_lt_x_gpu(4) + def test_dist_autograd_sync_streams(self): + + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + + # The reverse of this device mapping should be used for the backward pass. + options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + remote_compute = rpc.remote(dst, TensorPipeCudaDistAutogradTest.MyRemoteCompute) + local_compute = TensorPipeCudaDistAutogradTest.MyLocalCompute(remote_compute) + for _ in range(10): + input = torch.rand([1000, 10000], device=self.rank, requires_grad=True) + # Run local autograd + result = input * 2.0 + r = random.random() + loss = result.sum() * r + loss.backward() + + # Run distributed autograd + with dist_autograd.context() as context_id: + result = local_compute(input) + loss = result.sum() * r + dist_autograd.backward(context_id, [loss]) + + # Compare grads. + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(input.grad, grads[input]) + + rpc.shutdown() + + @skip_if_lt_x_gpu(4) + def test_gradients_synchronizations(self): + options = self.rpc_backend_options + for peer_rank in range(self.world_size): + options.set_device_map(worker_name(peer_rank), {self.rank: peer_rank}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 0: + # this is master + layers = [nn.Linear(2000, 2000) for _ in range(self.world_size - 1)] + local_layers = [l.to(0) for l in layers] + remote_layers = [] + for rank in range(1, self.world_size): + remote_layers.append(rpc.remote( + worker_name(rank), + WrapperModule, + args=(layers[rank - 1], rank) + )) + + x = torch.randn(5000, 2000).to(0) + # local iteration + local_model = nn.Sequential(*local_layers) + local_model(x).sum().backward() + + # remote iteration + with dist_autograd.context() as context_id: + for remote_layer in remote_layers: + x = remote_layer.rpc_sync().forward(x) + + dist_autograd.backward(context_id, [x.sum()]) + + futs = [] + for remote_layer in remote_layers: + futs.append(remote_layer.rpc_async().gradients(context_id)) + + for i in range(len(futs)): + local_gradients = [p.grad for p in local_layers[i].parameters()] + for g1, g2 in zip(futs[i].wait(), local_gradients): + self.assertEqual(g1, g2) + + rpc.shutdown() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c88eb8e479cd02a2d33511ee1ec2e6aab0c5657f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py @@ -0,0 +1,279 @@ + +import threading + +import torch +import torch.distributed.autograd as dist_autograd +import torch.distributed.rpc as rpc +from torch import optim +from torch.distributed.optim import DistributedOptimizer +from torch.testing._internal.dist_utils import dist_init +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + + +class MyModule: + lock = threading.Lock() + + def __init__(self, requires_grad=True): + # cannot directly use torch.manual_seed(0) as all threads share the same + # default generator. The race from multiple RPC threads could mess up + # the draw order from the default RNG instance, leading to + # non-deterministic behavior. Hence, create a dedicated RNG here. + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + self.w = torch.rand((3, 3), requires_grad=requires_grad, generator=g_cpu) + + def forward(self, t1): + return torch.mm(self.w, t1) + + def get_w(self): + return self.w + + +class FailingOptimizer(optim.Optimizer): + def __init__(self, params): + super().__init__(params, {}) + + def step(self, closure=None): + raise ValueError("Error running optimizer.") + + +class OptimizerFailingOnConstructor(optim.Optimizer): + def __init__(self, params): + super().__init__(params, {}) + raise ValueError("Error creating optimizer.") + + def step(self, closure=None): + raise NotImplementedError + + +def _call_method(method, obj_rref, *args, **kwargs): + return method(obj_rref.local_value(), *args, **kwargs) + + +def remote_method(method, obj_rref, *args, **kwargs): + """ + Call rpc.remote on a method in a remote object. + + Args: + method: the method (for example, Class.method) + obj_rref (RRef): remote reference to the object + args: positional arguments to pass to the method + kwargs: keyword arguments to pass to the method + + Returns a RRef to the remote method call result. + """ + return rpc.remote( + obj_rref.owner(), + _call_method, + args=[method, obj_rref] + list(args), + kwargs=kwargs, + ) + + +def rpc_async_method(method, obj_rref, *args, **kwargs): + """ + Call rpc.rpc_async on a method in a remote object. + + Args: + method: the method (for example, Class.method) + obj_rref (RRef): remote reference to the object + args: positional arguments to pass to the method + kwargs: keyword arguments to pass to the method + + Returns a Future to the method call result. + """ + return rpc.rpc_async( + obj_rref.owner(), + _call_method, + args=[method, obj_rref] + list(args), + kwargs=kwargs, + ) + + +class DistOptimizerTest(RpcAgentTestFixture): + @dist_init() + def test_dist_optim_exception(self): + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule) + remote_param1 = remote_method(MyModule.get_w, remote_module1) + remote_param2 = remote_method(MyModule.get_w, remote_module2) + + dist_optim = DistributedOptimizer( + FailingOptimizer, [remote_param1, remote_param2] + ) + + with dist_autograd.context() as context_id: + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = rpc_async_method(MyModule.forward, remote_module1, t2) + output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait()) + loss = torch.add(output2.wait(), t1).sum() + + dist_autograd.backward(context_id, [loss]) + with self.assertRaisesRegex(Exception, "Error running optimizer"): + dist_optim.step(context_id) + + @dist_init() + def test_dist_optim_exception_on_constructor(self): + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule) + remote_param1 = remote_method(MyModule.get_w, remote_module1) + remote_param2 = remote_method(MyModule.get_w, remote_module2) + + with self.assertRaisesRegex(Exception, "Error creating optimizer."): + dist_optim = DistributedOptimizer( + OptimizerFailingOnConstructor, [remote_param1, remote_param2] + ) + + def _test_dist_optim_base(self, optim_cls, *args, **kwargs): + # local version + module1 = MyModule() + module2 = MyModule() + params = [module1.get_w(), module2.get_w()] + local_optim = optim_cls(params, *args, **kwargs) + + old_w1 = module1.w.clone().detach() + old_w2 = module2.w.clone().detach() + + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = module1.forward(t2) + output2 = module2.forward(output1) + loss = torch.add(output2, t1).sum() + + loss.backward() + local_optim.step() + + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule) + remote_param1 = remote_method(MyModule.get_w, remote_module1) + remote_param2 = remote_method(MyModule.get_w, remote_module2) + + old_w1_remote = remote_param1.to_here() + + # sanity check: local and remote initial weights should match + self.assertEqual(old_w1, remote_param1.to_here()) + self.assertEqual(old_w2, remote_param2.to_here()) + + dist_optim = DistributedOptimizer( + optim_cls, [remote_param1, remote_param2], *args, **kwargs + ) + + with dist_autograd.context() as context_id: + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = rpc_async_method(MyModule.forward, remote_module1, t2) + output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait()) + loss = torch.add(output2.wait(), t1) + + dist_autograd.backward(context_id, [loss.sum()]) + dist_optim.step(context_id) + + new_w1 = rpc_async_method(MyModule.get_w, remote_module1).wait() + new_w2 = rpc_async_method(MyModule.get_w, remote_module2).wait() + + # ensure optimizer changed weights + self.assertNotEqual(old_w1, new_w1) + self.assertNotEqual(old_w2, new_w2) + # ensure local equals remote + self.assertEqual(new_w1, module1.get_w()) + self.assertEqual(new_w2, module2.get_w()) + + @dist_init() + def test_dist_optim(self): + self._test_dist_optim_base(optim.Adagrad, lr=0.05) + self._test_dist_optim_base(optim.Adam, lr=1e-2, amsgrad=True) + self._test_dist_optim_base(optim.AdamW, lr=0.05, amsgrad=True) + self._test_dist_optim_base(optim.SGD, lr=0.05) + self._test_dist_optim_base(optim.SGD, lr=1e-3, momentum=1, weight_decay=1, nesterov=True) + self._test_dist_optim_base(optim.Adadelta, rho=0.95) + self._test_dist_optim_base(optim.RMSprop, lr=0.05) + self._test_dist_optim_base(optim.Adamax, lr=0.05) + self._test_dist_optim_base(optim.Rprop, lr=0.05) + + def _test_dist_optim_none_grads(self, optim_cls, *args, **kwargs): + # local version + module1 = MyModule() + module2 = MyModule(requires_grad=False) + params = [module1.get_w(), module2.get_w()] + local_optim = optim_cls(params, *args, **kwargs) + + old_w1 = module1.w.clone().detach() + old_w2 = module2.w.clone().detach() + + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = module1.forward(t2) + output2 = module2.forward(output1) + loss = torch.add(output2, t1).sum() + + loss.backward() + local_optim.step() + + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule, args=(False,)) + remote_param1 = remote_module1.remote().get_w() + remote_param2 = remote_module2.remote().get_w() + + # sanity check: local and remote initial weights should match + self.assertEqual(old_w1, remote_param1.to_here()) + self.assertEqual(old_w2, remote_param2.to_here()) + + dist_optim = DistributedOptimizer( + optim_cls, [remote_param1, remote_param2], *args, **kwargs + ) + + with dist_autograd.context() as context_id: + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = remote_module1.rpc_async().forward(t2) + output2 = remote_module2.rpc_async().forward(output1.wait()) + loss = torch.add(output2.wait(), t1) + + dist_autograd.backward(context_id, [loss.sum()]) + dist_optim.step(context_id) + + new_w1 = remote_module1.rpc_async().get_w().wait() + new_w2 = remote_module2.rpc_async().get_w().wait() + + # ensure optimizer changed weights for w1 + self.assertNotEqual(old_w1, new_w1) + + # ensure optimizer not changed weights for w2 + self.assertEqual(old_w2, new_w2) + # ensure local equals remote + self.assertEqual(new_w1, module1.get_w()) + self.assertEqual(new_w2, module2.get_w()) + + @dist_init() + def test_dist_optim_none_grads(self): + self._test_dist_optim_none_grads(optim.SGD, lr=0.05) + self._test_dist_optim_none_grads(optim.RMSprop, lr=0.05) + self._test_dist_optim_none_grads(optim.Rprop, lr=0.05) + self._test_dist_optim_none_grads(optim.Adadelta, rho=0.95) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18d2cb91ad0e3e52830db2c557ebc23f9602ad39 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4de9ef0c261f59c326a00281d78951b1991b0df9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py @@ -0,0 +1,142 @@ +# If you need to modify this file to make this test pass, please also apply same edits accordingly to +# https://github.com/pytorch/examples/blob/master/distributed/rpc/batch/parameter_server.py +# and https://pytorch.org/tutorials/intermediate/rpc_async_execution.html#batch-updating-parameter-server + +import threading +from datetime import datetime +from time import perf_counter + +import torch +import torch.distributed.rpc as rpc +import torch.nn as nn +from torch import optim + +from torch.testing._internal.dist_utils import ( + dist_init, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import RpcAgentTestFixture + +batch_size = 20 +in_features = 100 +out_features = 30 +num_batches = 4 + + +def timed_log(text): + print(f"{datetime.now().strftime('%H:%M:%S')} {text}") + + +class BatchUpdateParameterServer: + + def __init__(self, batch_update_size): + self.model = nn.Linear(in_features, out_features) + self.lock = threading.Lock() + self.future_model = torch.futures.Future() + self.batch_update_size = batch_update_size + self.curr_update_size = 0 + self.optimizer = optim.SGD(self.model.parameters(), lr=0.001, momentum=0.9) + for p in self.model.parameters(): + p.grad = torch.zeros_like(p) + + def get_model(self): + return self.model + + @staticmethod + @rpc.functions.async_execution + def update_and_fetch_model(ps_rref, grads): + self = ps_rref.local_value() + for p, g in zip(self.model.parameters(), grads): + if p.grad is None: + p.grad = g + else: + p.grad += g + with self.lock: + timed_log(f"PS got {self.curr_update_size}/{self.batch_update_size} updates") + self.curr_update_size += 1 + fut = self.future_model + + if self.curr_update_size >= self.batch_update_size: + for p in self.model.parameters(): + p.grad /= self.batch_update_size + self.curr_update_size = 0 + self.optimizer.step() + self.optimizer.zero_grad() + fut.set_result(self.model) + timed_log("PS updated model") + self.future_model = torch.futures.Future() + + return fut + + +class Trainer: + + def __init__(self, ps_rref): + self.ps_rref = ps_rref + self.loss_fn = nn.L1Loss() + + def get_next_batch(self): + for _ in range(num_batches): + inputs = torch.randn(batch_size, in_features) + labels = torch.zeros(batch_size, out_features) + yield inputs, labels + + def train(self): + name = rpc.get_worker_info().name + m = self.ps_rref.rpc_sync().get_model() + for inputs, labels in self.get_next_batch(): + timed_log(f"{name} processing one batch") + self.loss_fn(m(inputs), labels).backward() + timed_log(f"{name} reporting grads") + m = rpc.rpc_sync( + self.ps_rref.owner(), + BatchUpdateParameterServer.update_and_fetch_model, + args=(self.ps_rref, [p.grad for p in m.cpu().parameters()]), + ) + timed_log(f"{name} got updated model") + + +def run_trainer(ps_rref): + trainer = Trainer(ps_rref) + trainer.train() + + +def run_ps(trainers): + timed_log("Start training") + start = perf_counter() + ps_rref = rpc.RRef(BatchUpdateParameterServer(len(trainers))) + futs = [] + for trainer in trainers: + futs.append( + rpc.rpc_async(trainer, run_trainer, args=(ps_rref,)) + ) + + torch.futures.wait_all(futs) + stop = perf_counter() + timed_log("Finish training") + timed_log(f"Time spent training: {stop-start}s") + +class ParameterServerTest(RpcAgentTestFixture): + + @dist_init(setup_rpc=False) + def test_batch_updating_parameter_server(self): + + if self.rank != 0: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + else: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + run_ps([f"{worker_name(r)}" for r in range(1, self.world_size)]) + + rpc.shutdown() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b7683064dcfd131d126dfd0fbefa4b4acb4e00a4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py @@ -0,0 +1,324 @@ +import torch +import time +import torch.distributed.rpc as rpc +from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs +from torch.testing._internal.dist_utils import ( + dist_init, + wait_until_pending_futures_and_users_flushed, + wait_until_owners_and_forks_on_rank, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + +def my_sleep_func(seconds=1): + time.sleep(seconds) + return torch.mul(torch.tensor(1), torch.tensor(1)) + +@torch.jit.script +def my_script_func(tensor): + return torch.add(tensor, tensor) + +def add_rref_to_value(rref, value): + return rref.to_here() + value + +class FaultyAgentRpcTest(RpcAgentTestFixture): + + # no faulty_messages defined so this fails all retryable messages - see + # faulty_rpc_agent_test_fixture.py for the list of retryable messages. + @dist_init(messages_to_delay={}) + def test_check_failed_messages(self): + if self.rank == 0: + dst_worker_b = worker_name((self.rank + 1) % self.world_size) + dst_worker_c = worker_name((self.rank + 2) % self.world_size) + + # Worker0 sends RPC to Worker1 and creates an RRef there + rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2))) + # Worker0 sends an RPC to Worker2 with the RRef as an arg + rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2))) + # check if the output is as expected + self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2))) + # explicitly delete all User RRefs + _delete_all_user_and_unforked_owner_rrefs() + + @dist_init + def test_verify_backend_options(self): + self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE) + self.assertEqual(self.rpc_backend_options.num_worker_threads, 8) + self.assertEqual(self.rpc_backend_options.num_fail_sends, 3) + self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4) + self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2) + self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]) + def test_custom_faulty_messages(self): + self.assertEqual( + {"RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"}, + set(self.rpc_backend_options.messages_to_fail), + ) + + @dist_init(faulty_messages=[]) + def test_no_faulty_messages(self): + self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0) + + @dist_init(messages_to_delay={"SCRIPT_CALL": 1.5}) + def test_custom_messages_to_delay(self): + self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5}) + + def _test_remote_message_dropped_pickle(self, dst=None): + if self.rank != 0: + return + dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # Since we fail python_remote_call messages synchronously, the future + # corresponding to this remote call will be marked with an error when + # this function returns. + rref = rpc.remote(dst_worker, my_sleep_func, args=(1,)) + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + # Attempt to fork the RRef should raise an error indicating the rpc.remote timeout. + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref._serialize() + # Test that using RRef as arg over RPC (which forks) results in the same + # error + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1)) + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_remote_message_dropped_pickle(self): + self._test_remote_message_dropped_pickle() + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_remote_message_dropped_pickle_to_self(self): + self._test_remote_message_dropped_pickle(self.rank) + + + def _test_remote_message_dropped_timeout(self, func, args, dst=None): + if self.rank != 0: + return + + # test the case where rpc.remote() message creation is completely dropped. + dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # Since we fail python_remote_call messages synchronously, the future + # corresponding to this remote call will be marked with an error when + # this function returns. + rref = rpc.remote(dst_worker, func, args=args) + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref.to_here() + # Note: during shutdown, logs will indicate "Could not find OwnerRRef..." + # on the owning nodes, this is expected because the OwnerRRef was never + # successfully created. Therefore, delAllUsers will work as expected. + + @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"]) + def test_builtin_remote_message_dropped_timeout(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_dropped_timeout(func, args) + + @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"]) + def test_builtin_remote_message_dropped_timeout_to_self(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_dropped_timeout(func, args, dst=0) + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_udf_remote_message_dropped_timeout(self): + func = my_sleep_func + args = (2,) + self._test_remote_message_dropped_timeout(func, args) + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_udf_remote_message_dropped_timeout_to_self(self): + func = my_sleep_func + args = (2,) + self._test_remote_message_dropped_timeout(func, args, dst=0) + + def _test_remote_message_delay_timeout(self, func, args, dst=None): + if self.rank != 0: + return + # Test the case where remote message is eventually processed on the owner, + # but the future on the creator times out before the response comes back. + dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # 10 ms timeout + rref = rpc.remote(dst_worker, func, args=args, timeout=0.001) + # Future corresponding to the remote creation should time out. + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rref._get_future().wait() + + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + # to_here() should now pick up that rpc.remote() creation has failed. + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref.to_here() + + # Test the case where rpc.remote() times out, but to_here() has already + # started blocking before. + # NOTE: we only test this when not sending to self, as to_here() calls + # calls localValue(), which does not send an RPC and thus does not have + # a timeout. This can be supported by allowing future.wait() to + # take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280) + if dst_rank != self.rank: + slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2) + + with self.assertRaisesRegex(RuntimeError, expected_error): + # to_here() should raise timeout error, since it does not know about the + # status of rpc.remote(). + slow_rref.to_here(0.001) + # Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete + # but this can be a noop since it may not exist on the owner yet. Later, + # the owner can process the RRef creation and wait for the delete message, + # thus leading to a timeout. + # Therefore, we wait until we get notification that pending owners have + # been confirmed before sending out RRefUserDeletes. + if dst_rank != self.rank: + wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank) + + @dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2}) + def test_udf_remote_message_delay_timeout(self): + func = my_sleep_func + args = (2,) + self._test_remote_message_delay_timeout(func, args) + + @dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2}) + def test_udf_remote_message_delay_timeout_to_self(self): + func = my_sleep_func + args = (1,) + self._test_remote_message_delay_timeout(func, args, dst=0) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_builtin_delay_timeout(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_delay_timeout(func, args) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_builtin_delay_timeout_to_self(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_delay_timeout(func, args, dst=0) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_script_delay_timeout(self): + func = my_script_func + args = (torch.tensor(1),) + self._test_remote_message_delay_timeout(func, args) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_script_delay_timeout_to_self(self): + func = my_script_func + args = (torch.tensor(1),) + self._test_remote_message_delay_timeout(func, args, dst=0) + + @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1}) + def test_rref_to_here_timeout(self): + if self.rank != 0: + return + + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + rref = rpc.remote( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rref.to_here(0.01) + + rref.to_here() + + @dist_init(faulty_messages=[]) + def test_rpc_builtin_timeout(self): + next_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(next_rank) + expected_error = self.get_timeout_error_regex() + # PYTHON_CALL message types which correspond to Python UDF over RPC + # by default get a delay (see faulty_rpc_agent_test_fixture) + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync( + dst_worker, + torch.add, + args=(torch.tensor(1), torch.tensor(1)), + timeout=1, + ) + + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1 + ) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure that the currently set default timeout is large enough such + # that RPCs with delays still complete. + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + fut.wait() + + # Ensure timeout if we set a new default and don't override + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure run to completion if we specify timeout of 0 + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0 + ) + fut.wait() + # Reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5}) + def test_rpc_script_timeout(self): + next_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(next_rank) + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1) + + fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure that the currently set default timeout is large enough such + # that RPCs with delays still complete. + fut = rpc.rpc_async( + dst_worker, my_script_func, args=(torch.tensor(1),) + ) + fut.wait() + + # Ensure timeout if we set a new default and don't override + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async( + dst_worker, my_script_func, args=(torch.tensor(1),) + ) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure run to completion if we specify timeout of 0 + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async( + dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0 + ) + fut.wait() + # Reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py new file mode 100644 index 0000000000000000000000000000000000000000..af73fef4794b06009c6c08b66bb9aa01b59f9448 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py @@ -0,0 +1,60 @@ +import torch.distributed.rpc as rpc +import torch.distributed.rpc._testing # noqa: F401 +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + +# The following message types are currently retried in the RREF protocol and +# distributed autograd. Thus only these messages should be tested with the +# Faulty RPC Agent. +retryable_message_types = ["RREF_FORK_REQUEST", + "RREF_CHILD_ACCEPT", + "RREF_USER_DELETE", + "CLEANUP_AUTOGRAD_CONTEXT_REQ"] + +# The following messages incur the corresponding delay in seconds while being +# processed in FaultyTensorPipeAgent's enqueueSend() function. +default_messages_to_delay = { + "PYTHON_CALL": 1.5, # Python UDF + "SCRIPT_CALL": 1.5, # Script/Builtin +} + +class FaultyRpcAgentTestFixture(RpcAgentTestFixture): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.messages_to_fail = retryable_message_types + self.messages_to_delay = default_messages_to_delay + + @property + def rpc_backend(self): + return rpc.backend_registry.BackendType[ + "FAULTY_TENSORPIPE" + ] + + @property + def rpc_backend_options(self): + return rpc.backend_registry.construct_rpc_backend_options( + self.rpc_backend, + init_method=self.init_method, + num_worker_threads=8, + num_fail_sends=3, + messages_to_fail=self.messages_to_fail, + messages_to_delay=self.messages_to_delay, + ) + + def setup_fault_injection(self, faulty_messages, messages_to_delay): + if faulty_messages is not None: + self.messages_to_fail = faulty_messages + if messages_to_delay is not None: + self.messages_to_delay = messages_to_delay + + def get_shutdown_error_regex(self): + error_regexes = [ + "Exception in thread pool task", + "Connection reset by peer", + "Connection closed by peer" + ] + return "|".join([f"({error_str})" for error_str in error_regexes]) + + def get_timeout_error_regex(self): + return "RPC ran for more than" diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py new file mode 100644 index 0000000000000000000000000000000000000000..e819613592299ed46570aca001e3a55479972a9a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py @@ -0,0 +1,64 @@ +import os +from abc import ABC, abstractmethod + +import torch.testing._internal.dist_utils + + +class RpcAgentTestFixture(ABC): + @property + def world_size(self) -> int: + return 4 + + @property + def init_method(self): + use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None) + if use_tcp_init == "1": + master_addr = os.environ["MASTER_ADDR"] + master_port = os.environ["MASTER_PORT"] + return f"tcp://{master_addr}:{master_port}" + else: + return self.file_init_method + + @property + def file_init_method(self): + return torch.testing._internal.dist_utils.INIT_METHOD_TEMPLATE.format( + file_name=self.file_name + ) + + @property + @abstractmethod + def rpc_backend(self): + pass + + @property + @abstractmethod + def rpc_backend_options(self): + pass + + def setup_fault_injection(self, faulty_messages, messages_to_delay): # noqa: B027 + """Method used by dist_init to prepare the faulty agent. + + Does nothing for other agents. + """ + pass + + # Shutdown sequence is not well defined, so we may see any of the following + # errors when running tests that simulate errors via a shutdown on the + # remote end. + @abstractmethod + def get_shutdown_error_regex(self): + """ + Return various error message we may see from RPC agents while running + tests that check for failures. This function is used to match against + possible errors to ensure failures were raised properly. + """ + pass + + @abstractmethod + def get_timeout_error_regex(self): + """ + Returns a partial string indicating the error we should receive when an + RPC has timed out. Useful for use with assertRaisesRegex() to ensure we + have the right errors during timeout. + """ + pass diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d1e93ddb4ae6ef435552299091c684b5a91d9941 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py @@ -0,0 +1,6493 @@ +import concurrent.futures +import contextlib +import json +import os +import sys +import threading +import time + +from collections import namedtuple +from functools import partial +from threading import Event +from threading import Lock +from unittest import mock + +import torch +import torch.nn as nn +import torch.distributed as dist +import torch.distributed.rpc as rpc +import torch.distributed.autograd as dist_autograd +from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info, WorkerInfo +from torch.distributed.rpc.api import _use_rpc_pickler, _thread_local_var, _wait_all +from torch.distributed.rpc.internal import ( + PythonUDF, + RPCExecMode, + _internal_rpc_pickler, + _build_rpc_profiling_key, +) +from torch.futures import Future +from torch.testing._internal.common_distributed import ( + skip_if_lt_x_gpu, + captured_output, + tp_transports, +) +from torch.testing._internal.common_utils import ( + IS_MACOS, + load_tests, + skip_but_pass_in_sandcastle_if, + get_cycles_per_ms, +) + +from torch.testing._internal.dist_utils import ( + dist_init, + get_function_event, + initialize_pg, + wait_until_node_failure, + wait_until_pending_futures_and_users_flushed, + wait_until_owners_and_forks_on_rank, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.common_utils import TemporaryFileName + +from torch.autograd.profiler_legacy import profile as _profile + + +def foo_add(): + return torch.add(torch.ones(1), torch.ones(1)) + +def udf_with_torch_ops(device=-1, use_record_function=False): + device_ctx = contextlib.nullcontext() if device == -1 else torch.cuda.device(device) + record_function_ctx = ( + torch.autograd.profiler.record_function("##forward##") + if use_record_function + else contextlib.nullcontext() + ) + with device_ctx, record_function_ctx: + t1, t2 = torch.ones(1), torch.ones(1) + t = torch.add(t1, t2) + t = torch.mul(t, t) + t = t.relu() + t = t.sigmoid() + +# Events (operator invocations) that are expected to be ran as part of the above +# function. +EXPECTED_REMOTE_EVENTS = [ + "aten::ones", + "aten::ones", + "aten::add", + "aten::mul", + "aten::relu", + "aten::clamp_min", + "aten::sigmoid", +] + +# Remote operations are prefixed with the following string for RPC profiling. +REMOTE_OP_STR = "#remote_op: " + + +VALUE_FUTURE = concurrent.futures.Future() +DONE_FUTURE = concurrent.futures.Future() + +FIFTY_MIL_CYCLES = 50000000 + +_rpc_barrier_count = 0 + +def _increment_count(): + global _rpc_barrier_count + _rpc_barrier_count += 1 + +def _reset_count(): + global _rpc_barrier_count + _rpc_barrier_count = 0 + +class StubRpcAgent: + def __init__(self, world_size): + self.world_size = world_size + + def get_worker_infos(self): + return { + WorkerInfo(name=worker_name(rank), id=rank) + for rank in range(self.world_size) + } + + +def _stub_construct_rpc_backend_options_handler(**kwargs): + return mock.Mock() # RpcBackendOptions. + + +def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options): + return StubRpcAgent(world_size=world_size) + + +def set_value(value): + VALUE_FUTURE.set_result(value) + + +def wait_for_value_future(): + return VALUE_FUTURE.result() + + +def set_and_check_done(value): + VALUE_FUTURE.set_result(value) + return DONE_FUTURE.result() + + +# it is used to test python user defined function over rpc +# classes and functions are used to test python user defined class and +# methods over rpc +TensorClass = namedtuple("TensorClass", ["tensors"]) + +class MyPickleClass: + def __init__(self): + self.t = None + + def __getstate__(self): + (pickled_python_udf, tensors) = _internal_rpc_pickler.serialize( + PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None) + ) + return (pickled_python_udf, tensors) + + def __setstate__(self, obj): + python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1]) + result = python_udf.func(python_udf.args[0], python_udf.args[1]) + self.t = result + + def set(self, val): + self.t = val + + +class SlowPickleClass: + def __init__(self, t): + self.t = t + + def __getstate__(self): + time.sleep(self.t) + return (self.t, ) + + def __setstate__(self, obj): + self.t = obj[0] + time.sleep(self.t) + + +class MyClass: + def __init__(self, a, delay=False): + self.a = a + # delay initialization to simulate errors if specified + if delay: + time.sleep(2) + + def my_instance_method(self, b): + return self.a + b + + @classmethod + def my_class_method(cls, d, e): + return d + e + + @staticmethod + def my_static_method(f): + return f > 10 + + def increment_value(self, increment): + self.a += increment + + def get_value(self): + return self.a + + def my_slow_method(self, my_tensor_arg): + time.sleep(5) + return torch.add(self.a, my_tensor_arg) + + +def _call_method_on_rref(method, rref, *args, **kwargs): + return method(rref.local_value(), *args, **kwargs) + + +def get_rref_list(values): + return [RRef(MyClass(a)) for a in values] + + +def add_rref_to_value(rref, value): + return rref.to_here() + value + + +def run_nested_pickle(pickle_cls_instance, tensor): + return pickle_cls_instance.t + tensor + +def build_sparse_tensor(coalesce=False): + i = [[0, 1, 1], [2, 0, 2]] + v = [3, 4, 5] + tensor = torch.sparse_coo_tensor(i, v, (2, 3)) + if coalesce: + tensor = tensor.coalesce() + return tensor + +def build_complex_tensors(): + a = torch.ones(3, 3) + b = [a, a] + c = [b, b] + d = [a, b] + e = {a: d} + return [a, b, c, d, e] + +def non_cont_test(t_view, t_cont): + if t_view.is_contiguous(): + raise Exception('t_view is contiguous!') + if not t_cont.is_contiguous(): + raise Exception('t_cont is not contiguous!') + if not torch.equal(t_view, t_cont): + raise Exception('t_view is not equal to t_cont!') + return t_view + +def my_function(a, b, c): + return a + b + c + + +def my_tensor_function(a, b): + return a + b + +def my_container_sum(a): + result = a[0] + for tensor in a[1:]: + result += tensor + return result + + +def my_sleep_func(seconds=1): + time.sleep(seconds) + return torch.mul(torch.tensor(1), torch.tensor(1)) + + +def my_complex_tensor_function(list_input, tensor_class_input, dict_input): + res = list_input[0] + for t in list_input: + res += t + for v in dict_input.values(): + res += v + complex_tensors = tensor_class_input.tensors + return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2]) + + +def my_rref_function(rref_a, rref_b): + return rref_a.to_here() + rref_b.to_here() + + +def delayed_add(a, b, seconds=0.05): + time.sleep(seconds) + return a + b + + +def identity(a): + return a + +def no_result(): + print("do nothing") + +def raise_or_inc(value): + if value.numel() == 2: + raise ValueError("Expected error") + return value + 1 + +def nested_rpc(dst): + return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1)) + + +def nested_rpc_sparse(dst): + return rpc.rpc_sync( + dst, + torch.add, + args=(build_sparse_tensor(), build_sparse_tensor()) + ) + + +def multi_layer_nested_async_rpc(dst, world_size, ttl): + # this method returns immediately without blocking the callee, but will + # generate additional requests. + if ttl > 0: + current_dst = worker_name(dst) + next_dst = (dst + 1) % world_size + rpc.rpc_async( + current_dst, + multi_layer_nested_async_rpc, + args=(next_dst, world_size, ttl - 1), + ) + return 0 + + +def nested_rref(dst): + return ( + rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)), + rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)), + ) + + +def nested_rref_sparse(dst): + return ( + rpc.remote( + dst, + torch.add, + args=(build_sparse_tensor(), build_sparse_tensor()) + ), + rpc.remote( + dst, + torch.add, + args=(build_sparse_tensor(), build_sparse_tensor()) + ), + ) + + +def nested_remote(dst): + rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3)) + return rref.to_here() + +def nested_remote_sparse(dst): + rref = rpc.remote(dst, torch.add, args=(build_sparse_tensor(), build_sparse_tensor())) + return rref.to_here() + + +def rref_forward_chain(dst, world_size, rref, ttl): + if ttl > 0: + current_dst = worker_name(dst) + next_dst = (dst + 1) % world_size + ret_rref = rpc.remote( + current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1) + ) + return [ret_rref] + else: + return rref.to_here() + + +def rpc_return_rref(dst): + return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)) + + +def light_rpc(): + return 0 + + +def heavy_rpc(tensor): + for i in range(1, 100): + tensor *= i + tensor /= i + 1 + return 0 + + +def heavy_rpc_sparse(tensor): + for i in range(1, 100): + tensor *= i + tensor = tensor / (i + 1) + return 0 + +@torch.jit.script +def heavy_rpc_torchscript(tensor): + for i in range(1, 100): + tensor *= i + tensor /= i + 1 + return 0 + + +@torch.jit.script +def my_script_func(tensor): + return torch.add(tensor, tensor) + + +expected_err = "Expected error" + +# Note that it needs to inherit from Exception, not BaseException. See comment +# in rpc/internal.py +class CustomException(Exception): + def __init__(self, bool, msg): + self.bool = bool + super().__init__(msg) + +def raise_func(): + raise ValueError(expected_err) + +def custom_raise_func(): + raise CustomException(True, "foo") + +@torch.jit.script +def raise_func_script(expected_err: str) -> torch.Tensor: + raise ValueError(expected_err) + +expected_err_escape = "\nFirst line of error \n next line of error \n last line of error" +def raise_func_escape(): + raise ValueError(expected_err_escape) + + +global_rref = None + + +def set_global_rref(rref): + global global_rref + global_rref = rref + + +def clear_global_rref(): + global global_rref + global_rref = None + + +def check_rref_confirmed(rref): + return rref.confirmed_by_owner() + + +def get_rref_debug_info(): + return _rref_context_get_debug_info() + + +def add_use_future_cb(to, x, y, z): + out = concurrent.futures.Future() + + def callback(fut): + out.set_result(fut.wait() + z) + + fut = rpc.rpc_async(to, torch.add, args=(x, y)) + fut.then(callback) + return out.result() + + +def get_events_from_profile(profile_rref): + return profile_rref.local_value().process_global_function_events + + +def add_use_future_set_result(to, x, y, z): + out = torch.futures.Future() + fut = rpc.rpc_async(to, torch.add, args=(x, y)) + fut.then(lambda fut : out.set_result(fut.wait() + z)) + return out.wait() + + +def add_use_future_nested_cb(to, x, y, z): + out = torch.futures.Future() + + def callback(fut1): + fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z)) + fut2.then(lambda fut2 : out.set_result(fut2.wait())) + + fut1 = rpc.rpc_async(to, torch.add, args=(x, y)) + fut1.then(callback) + return out.wait() + + +def fail_on_fut(fut): + pass + + +@rpc.functions.async_execution +def async_raise_func(): + raise RuntimeError("Expected error") + + +@rpc.functions.async_execution +def async_wrong_type(): + return torch.zeros(2, 2) + + +@rpc.functions.async_execution +def async_add(to, x, y): + return rpc.rpc_async(to, torch.add, args=(x, y)) + + +def slow_add(x, y, device="cpu"): + time.sleep(1) + x = x.to(device) + y = y.to(device) + return torch.add(x, y).cpu() + + +@rpc.functions.async_execution +def slow_async_add(to, x, y, device="cpu"): + return rpc.rpc_async(to, slow_add, args=(x, y, device)) + + +@rpc.functions.async_execution +def async_add_with_future_ctor(to, x, y, z): + fut = torch.futures.Future() + rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut1: fut.set_result(fut1.wait() + z) + ) + return fut + + +@rpc.functions.async_execution +def async_add_chained(to, x, y, z): + return rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: fut.wait() + z + ) + + +@rpc.functions.async_execution +def async_add_chained_multi(to, x, num, step): + fut = rpc.rpc_async(to, torch.add, args=(x, 0)) + for _ in range(num): + fut = fut.then(lambda fut: fut.wait() + step) + return fut + + +@rpc.functions.async_execution +def async_add_nested(to, x, y, z): + return rpc.rpc_async(to, async_add, args=(to, x, y)).then( + lambda fut: fut.wait() + z + ) + + +@rpc.functions.async_execution +def async_add_multi_fanout(to, x, num, step): + futs = [] + for i in range(num): + if i == 0: + futs.append(rpc.rpc_async(to, torch.add, args=(x, step))) + else: + futs.append(rpc.rpc_async(to, torch.add, args=(0, step))) + + # TODO: use torch.futures.collect_all + lock = Lock() + state = {"cnt": 0, "ret": torch.zeros_like(x)} + ret_future = torch.futures.Future() + + def inc_and_set(fut): + with lock: + state["cnt"] += 1 + state["ret"] += fut.wait() + if state["cnt"] >= len(futs): + ret_future.set_result(state["ret"]) + + for fut in futs: + fut.then(inc_and_set) + + return ret_future + + +@rpc.functions.async_execution +def async_cuda_sleep_and_set_to_one(t): + device = t.device + original_stream = torch.cuda.current_stream(device) + new_stream = torch.cuda.Stream(device) + new_stream.wait_stream(original_stream) + with torch.cuda.stream(new_stream): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + t.fill_(1) + fut = Future(devices=[device]) + fut.set_result(t) + return fut + + +@rpc.functions.async_execution +def async_cuda_nested_add(to, x, y, z): + def cb(fut): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + return fut.value() + z + + return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb) + + +# A custom Python class that contains a tensor, needed to see if we correctly +# use the Python pickler to extract tensors from non-IValue-convertible types. +class TensorWrapper: + __slots__ = ("tensor", "lock", "event", "thread") + + def __init__(self, t): + self.tensor = t + # Add one non-picklable field, to ensure it's ignored/skipped. + self.lock = Lock() + self.event = torch.cuda.Event(enable_timing=True) + self.thread = threading.Thread() + self.thread.start() + + def increase(self, v): + with self.lock: + self.tensor += v + + def sum(self): + with self.lock: + self.event.record() + return self.tensor.sum() + + +class AsyncExecutionClass: + + @staticmethod + @rpc.functions.async_execution + def static_async_add(to, x, y, z): + return rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: fut.wait() + z + ) + + @classmethod + @rpc.functions.async_execution + def class_async_add(cls, to, x, y, z): + ret_fut = torch.futures.Future() + rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: ret_fut.set_result(fut.wait() + z) + ) + return ret_fut + + @rpc.functions.async_execution + def bound_async_add(self, to, x, y, z): + return rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: fut.wait() + z + ) + + +def return_future(): + return torch.futures.Future() + + +class FooBackendOptions(rpc.RpcBackendOptions): + def __init__(self, init_method): + # Must call the __init__ of the superclass (and do so directly, + # without using super()) because... pybind. + rpc.RpcBackendOptions.__init__(self) + self.init_method = init_method + + +# load_tests from common_utils is used to automatically filter tests for +# sharding on sandcastle. This line silences flake warnings +load_tests = load_tests + + +class MyEmbeddingBagModel(torch.nn.Module): + def __init__(self, sparse): + super().__init__() + self.eb = torch.nn.EmbeddingBag( + 10, + 10, + sparse=sparse + ) + + def forward(self, x): + return self.eb(x) + + +class MyParameterServer: + def __init__(self, trainers): + self.lock = Lock() + self.trainers = trainers + self.iteration = 0 + self.updates = 0 + self.futures = [] + self.total = None + self.gradient = None + + @staticmethod + def get_gradient(rref): + return rref.local_value().gradient + + @staticmethod + @rpc.functions.async_execution + def average(rref, riteration, tensor): + self = rref.local_value() + fut = torch.futures.Future() + with self.lock: + if riteration > self.iteration: + self.iteration = riteration + self.updates = 0 + self.futures.clear() + self.futures.append(fut) + if self.total is None: + self.total = tensor + else: + self.total += tensor + self.updates += 1 + if self.trainers == self.updates: + self.gradient = self.total / float(self.trainers) + for fut in self.futures: + result = self.total / float(self.trainers) + fut.set_result(result) + return fut + + +class MyConvNetForMNIST(nn.Module): + def __init__(self, device): + super().__init__() + self.net = nn.Sequential( + nn.Conv2d(1, 16, 3, 1), + nn.ReLU(), + nn.Conv2d(16, 32, 3, 1), + nn.ReLU(), + nn.MaxPool2d(2), + nn.Flatten(1), + nn.Linear(4608, 128), + nn.ReLU(), + nn.Linear(128, 10), + ).to(device) + self.device = device + + def forward(self, x, is_rref=False): + x = x.to_here() if is_rref else x + with torch.cuda.stream(torch.cuda.current_stream(self.device)): + # intentionally adding delay to current CUDA stream + torch.cuda._sleep(10 * FIFTY_MIL_CYCLES) + return self.net(x) + + def __getstate__(self): + # return an empty dict to avoid inspecting the model contents on the + # owner + return {} + + +class RpcTestCommon: + def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None): + if mode == RPCExecMode.SYNC: + return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs) + elif mode == RPCExecMode.ASYNC: + return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait() + elif mode == RPCExecMode.REMOTE: + return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here() + + def _self_py_udf_remote(self, worker_info, x, y, z): + rref = rpc.remote(worker_info, my_function, args=(x, y, z)) + self.assertEqual(rref.to_here(), x + y + z) + + def _self_remote_rref_as_rpc_arg(self, dst, x, y, z): + self_worker_info = rpc.get_worker_info() + rref = rpc.remote(self_worker_info, my_function, args=(x, y, z)) + fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, x)) + ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, x + y)) + self.assertEqual(ret, x + y + z + x + y) + self.assertEqual(fut.wait(), x + y + z + x) + + def _self_remote_rref_as_remote_arg(self, dst, x, y, z): + self_worker_info = rpc.get_worker_info() + rref = rpc.remote(self_worker_info, my_function, args=(x, y, z)) + ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, x)) + self.assertEqual( + ret_rref.to_here(), x + y + z + x + ) + + def _world_size_one(self, a, b): + if self.rank == 0: + rpc.init_rpc( + name="me", + backend=self.rpc_backend, + rank=0, + world_size=1, + rpc_backend_options=self.rpc_backend_options, + ) + + def _rpc_sync(x, y): + expect = x * 2 + result = rpc.rpc_sync( + "me", + my_tensor_function, + args=(x, y) + ) + self.assertEqual(expect, result) + + def _rpc_async(x, y): + expect = x * 2 + result = rpc.rpc_async( + "me", + my_tensor_function, + args=(x, y) + ).wait() + self.assertEqual(expect, result) + + def _remote(x, y): + expect = x * 2 + result = rpc.remote( + "me", + my_tensor_function, + args=(x, y) + ).to_here() + self.assertEqual(expect, result) + + _rpc_sync(a, b) + _rpc_async(a, b) + _remote(a, b) + + rpc.shutdown() + + def _multi_rpc(self, sparse): + dst_rank = (self.rank + 1) % self.world_size + for i in range(20): + n = i + self.rank + 1 + if sparse: + x = build_sparse_tensor() * n + y = build_sparse_tensor() * n + else: + x = torch.ones(2, 2) + y = torch.ones(2, 2) + ret = rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(x, y), + ) + self.assertEqual(ret, x * 2) + + def _run_uneven_workload(self, f, x, num_repeat=30): + # worker0 drives and waits for worker1 and worker2 + # throughout the test. + if self.rank == 0: + self.assertTrue(self.world_size >= 3) + + # Phase 1: Only worker1 has workload. + dst = "worker1" + futs = [] + for _ in range(num_repeat): + fut = rpc.rpc_async(dst, f, args=(x,)) + futs.append(fut) + + for fut in torch.futures.collect_all(futs).wait(): + self.assertEqual(fut.wait(), 0) + + # Phase 2: Only worker2 has workload. + # If join is not correctly implemented, + # worker2 should be closed by now. + dst = "worker2" + futs = [] + for _ in range(num_repeat): + fut = rpc.rpc_async(dst, f, args=(x,)) + futs.append(fut) + + for val in torch.futures.wait_all(futs): + self.assertEqual(val, 0) + + def _wait_all_workers(self, f, x): + initialize_pg(self.file_init_method, self.rank, self.world_size) + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + self._run_uneven_workload(f, x) + + # worker0 calls this at the end after waiting for RPC responses. + # worker1/2 calls this immediately and has some works after it. + # worker3 calls this immediately and has no more work. + rpc.api._wait_all_workers() + + # Wait before proceeding to shutdown to ensure worker0 RPCs make + # it through to other workers. + dist.barrier() + rpc.shutdown(graceful=False) + + def _wait_all_workers_twice(self, f, x): + initialize_pg(self.file_init_method, self.rank, self.world_size) + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + self._run_uneven_workload(f, x) + + # worker0 calls this at the end after waiting for RPC responses. + # worker1/2 calls this immediately and has some works after it. + # worker3 calls this immediately and has no more work. + rpc.api._wait_all_workers() + rpc.api._wait_all_workers() + + # Wait before proceeding to shutdown to ensure worker0 RPCs make + # it through to other workers. + dist.barrier() + rpc.shutdown(graceful=False) + + def _nested_rpc(self, f, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + f, + args=(worker_name(self.rank),), + ) + self.assertEqual(ret, expected) + + def _stress_test_rpc(self, f, repeat=1000, args=()): + n = self.rank + 1 + dst_rank = n % self.world_size + futs = [] + tik = time.time() + for _ in range(repeat): + fut = rpc.rpc_async(worker_name(dst_rank), f, args=args) + futs.append(fut) + + for val in torch.futures.wait_all(futs): + self.assertEqual(val, 0) + tok = time.time() + print( + f"Rank {self.rank} finished testing {repeat} times in {tok - tik} seconds." + ) + + def _builtin_remote_ret(self, x, y, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + rref = rpc.remote( + worker_name(dst_rank), + torch.add, + args=(x, y), + ) + self.assertEqual(rref.to_here(), expected) + + def _builtin_remote_self(self, x, y, expected): + rref = rpc.remote( + worker_name(self.rank), + torch.add, + args=(x, y), + ) + self.assertEqual(rref.local_value(), expected) + + def _test_multi_remote_call(self, fn, sparse, args_fn=lambda x, y: (), kwargs_fn=lambda x, y: {}): + m = 10 + n = self.rank + 1 + dst_rank = n % self.world_size + rrefs = [] + expected = [] + for i in range(m): + n = n + i + rrefs.append( + rpc.remote( + worker_name(dst_rank), + fn, + args=args_fn(n, sparse), + kwargs=kwargs_fn(n, sparse), + ) + ) + expected.append(fn(*args_fn(n, sparse), **kwargs_fn(n, sparse))) + + for i in range(m): + self.assertEqual(rrefs[i].to_here(), expected[i]) + + def _py_rref_args(self, a, b, x, y, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_a = rpc.remote( + worker_name(dst_rank), torch.add, args=(a, b) + ) + rref_b = rpc.remote( + worker_name(dst_rank), torch.add, args=(x, y) + ) + rref_c = rpc.remote( + worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(rref_c.to_here(), expected) + + def _py_rref_args_user_share(self, a, b, c, x, y, z, expected): + n = self.rank + 1 + owner_rank = n % self.world_size + user_rank = (n + 1) % self.world_size + rref_a = rpc.remote( + worker_name(owner_rank), my_function, args=(a, b, c) + ) + rref_b = rpc.remote( + worker_name(owner_rank), my_function, args=(x, y, z) + ) + rref_c = rpc.remote( + worker_name(user_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(rref_c.to_here(), expected) + + def _py_rpc_rref_args(self, a, b, c, x, y, z, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_a = rpc.remote( + worker_name(dst_rank), my_function, args=(a, b, c) + ) + rref_b = rpc.remote( + worker_name(dst_rank), my_function, args=(x, y, z) + ) + + c = rpc.rpc_sync( + worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(c, expected) + + def _nested_remote(self, f, expected): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + + rref = rpc.remote( + worker_name(dst_rank1), + f, + args=(worker_name(dst_rank2),), + ) + self.assertEqual(rref.to_here(), expected) + + def _nested_rref(self, f, expected1, expected2): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + rref_of_rrefs = rpc.remote( + worker_name(dst_rank1), + f, + args=(worker_name(dst_rank2),), + ) + + # Say C has 2 OwnerRRefs. + # B has 2 UserRRefs to those 2 OwnerRRefs, respectively. + # This call is effectively A asking B to share its 2 UserRRefs. + rrefs = rref_of_rrefs.to_here() + + self.assertEqual(len(rrefs), 2) + self.assertEqual(rrefs[0].to_here(), expected1) + self.assertEqual(rrefs[1].to_here(), expected2) + + def _nested_rref_stress(self, f, expected1, expected2): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + all_rrefs = [] + for _ in range(20): + all_rrefs.append( + rpc.remote( + worker_name(dst_rank1), + f, + args=(worker_name(dst_rank2),), + ) + ) + + for i in range(20): + rref_of_rrefs = all_rrefs[i] + rrefs = rref_of_rrefs.to_here() + self.assertEqual(len(rrefs), 2) + self.assertEqual(rrefs[0].to_here(), expected1) + self.assertEqual(rrefs[1].to_here(), expected2) + + def _trainer_func(self, rref, sparse): + m = MyEmbeddingBagModel(sparse=sparse) + loss_fn = nn.MSELoss() + for i in range(10): + outputs = m(torch.rand(10, 10).long()) + loss_fn(outputs, torch.rand(10, 10)).backward() + gradient = next(iter(m.parameters())).grad + fut = rref.rpc_async().average(rref, i, gradient) + gradient = fut.wait() + if gradient.is_sparse: + gradient = gradient.to_dense().double() + ps_gradient = rref.rpc_sync().get_gradient(rref) + if ps_gradient.is_sparse: + ps_gradient = ps_gradient.to_dense().double() + self.assertTrue(torch.equal(gradient, ps_gradient)) + + def _my_parameter_server(self, sparse): + ps_rref = RRef(MyParameterServer(self.world_size - 1)) + futures = [] + for index in range(1, self.world_size): + futures.append( + rpc.rpc_async( + worker_name((self.rank + index) % self.world_size), + self._trainer_func, + args=( + ps_rref, + sparse + ), + ) + ) + torch.futures.wait_all(futures) + + def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor): + # We check proper CUDA stream synchronization by adding to the tensor + # in one stream to get the expected value, and reading it from another stream. + future = Future(devices=["cuda:0"]) + with torch.cuda.device("cuda:0"): + stream = torch.cuda.Stream() + another_stream = torch.cuda.Stream() + with torch.cuda.stream(stream): + if sparse_tensor: + tensor = build_sparse_tensor().to("cuda:0") + add_tensor = build_sparse_tensor().to("cuda:0") + expected_tensor = (tensor + add_tensor).coalesce() + else: + tensor = torch.zeros((100,), device="cuda:0") + add_tensor = torch.ones((100,), device="cuda:0") + expected_tensor = tensor + add_tensor + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor += add_tensor + if sparse_tensor: + tensor = tensor.coalesce() + future.set_result(wrapper(tensor)) + with torch.cuda.stream(another_stream): + tensor = unwrapper(future.wait()) + if sparse_tensor: + self.assertTrue(torch.eq(tensor.indices(), expected_tensor.indices()).all().item()) + self.assertTrue(torch.eq(tensor.values(), expected_tensor.values()).all().item()) + self.assertEqual(tensor.size(), expected_tensor.size()) + else: + self.assertTrue(torch.eq(tensor, expected_tensor).all().item()) + + +class RpcTest(RpcAgentTestFixture, RpcTestCommon): + @dist_init + def test_worker_id(self): + n = self.rank + 1 + peer_rank = n % self.world_size + self_worker_info = rpc.get_worker_info() + peer_worker_info = rpc.get_worker_info(worker_name(peer_rank)) + + self.assertEqual(self_worker_info.name, worker_name(self.rank)) + self.assertEqual(peer_worker_info.name, worker_name(peer_rank)) + + with self.assertRaisesRegex(RuntimeError, "could not find destination"): + unknown_worker_id = rpc.get_worker_info("WorkerUnknown") + + @dist_init + def test_get_worker_infos(self): + worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos() + + worker_names = {worker_info.name for worker_info in worker_infos} + expected_worker_names = { + worker_name(rank) for rank in range(self.world_size) + } + self.assertEqual(worker_names, expected_worker_names) + + worker_ids = {worker_info.id for worker_info in worker_infos} + expected_worker_ids = set(range(self.world_size)) + self.assertEqual(worker_ids, expected_worker_ids) + + @dist_init + def test_self_add(self): + self_worker_info = rpc.get_worker_info() + self_worker_name = worker_name(self.rank) + fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1)) + ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1)) + self.assertEqual(fut.wait(), torch.ones(2, 2) + 1) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + @dist_init + def test_send_to_rank(self): + dst_rank = (self.rank + 1) % self.world_size + + # Test dense tensor + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + # Test invalid ranks + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(RuntimeError): + self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(RuntimeError): + self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(ValueError): + self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(ValueError): + self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + @dist_init + def test_self_py_udf_remote(self): + self._self_py_udf_remote( + rpc.get_worker_info(), + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_rpc_arg(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_rpc_arg( + dst, + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_self_rpc_arg(self): + self._self_remote_rref_as_rpc_arg( + rpc.get_worker_info(), + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_remote_arg(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_remote_arg( + dst, + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_self_remote_arg(self): + self._self_remote_rref_as_remote_arg( + rpc.get_worker_info(), + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_rref_proxy_non_exist(self): + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3)) + msg = "has no attribute \'non_exist\'" + with self.assertRaisesRegex(AttributeError, msg): + rref.rpc_sync().non_exist() + + with self.assertRaisesRegex(AttributeError, msg): + rref.rpc_async().non_exist().wait() + + with self.assertRaisesRegex(AttributeError, msg): + rref.remote().non_exist() + + def _test_rref_proxy_tensor(self, dst): + rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3)) + + expected = torch.ones(2, 2) + 1 + 3 + self.assertEqual(expected.size(), rref.rpc_sync().size()) + self.assertEqual(expected + 1, rref.rpc_async().add(1).wait()) + self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here()) + + @dist_init + def test_rref_proxy_tensor(self): + self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size)) + + @dist_init + def test_rref_proxy_tensor_self(self): + self._test_rref_proxy_tensor(rpc.get_worker_info()) + + @dist_init + def test_rref_proxy_reuse(self): + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), + my_function, + args=(torch.ones(2, 2), 1, 3) + ) + expected = torch.ones(2, 2) + 1 + 3 + + proxy_rpc_sync = rref.rpc_sync() + proxy_rpc_async = rref.rpc_async() + proxy_remote = rref.remote() + + self.assertEqual(expected.size(), proxy_rpc_sync.size()) + self.assertEqual(expected + 1, proxy_rpc_sync.add(1)) + self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4)) + + self.assertEqual(expected.size(), proxy_rpc_async.size().wait()) + self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait()) + self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait()) + + self.assertEqual(expected.size(), proxy_remote.size().to_here()) + self.assertEqual(expected + 5, proxy_remote.add(5).to_here()) + self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here()) + + def _test_rref_proxy_class(self, dst): + rref = rpc.remote(dst, MyClass, args=(7,)) + expected = MyClass(7) + self.assertEqual(expected.get_value(), rref.rpc_sync().get_value()) + self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait()) + self.assertEqual(expected.get_value(), rref.remote().get_value().to_here()) + + expected.increment_value(3) + self.assertEqual(None, rref.rpc_sync().increment_value(1)) + self.assertEqual(None, rref.rpc_async().increment_value(1).wait()) + self.assertEqual(None, rref.remote().increment_value(1).to_here()) + + self.assertEqual(expected.get_value(), rref.rpc_sync().get_value()) + self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait()) + self.assertEqual(expected.get_value(), rref.remote().get_value().to_here()) + + self.assertEqual( + expected.my_instance_method(2), + rref.rpc_sync().my_instance_method(2) + ) + self.assertEqual( + expected.my_instance_method(3), + rref.rpc_async().my_instance_method(3).wait() + ) + self.assertEqual( + expected.my_instance_method(4), + rref.remote().my_instance_method(4).to_here() + ) + + self.assertEqual( + expected.my_static_method(9), + rref.rpc_sync().my_static_method(9) + ) + self.assertEqual( + expected.my_static_method(10), + rref.rpc_async().my_static_method(10).wait() + ) + self.assertEqual( + expected.my_static_method(11), + rref.remote().my_static_method(11).to_here() + ) + + self.assertEqual( + expected.my_class_method(2, torch.zeros(2, 2)), + rref.rpc_sync().my_class_method(2, torch.zeros(2, 2)) + ) + self.assertEqual( + expected.my_class_method(2, torch.ones(3, 3)), + rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait() + ) + self.assertEqual( + expected.my_class_method(2, torch.ones(4, 4)), + rref.remote().my_class_method(2, torch.ones(4, 4)).to_here() + ) + + @dist_init + def test_rref_proxy_class(self): + self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size)) + + @dist_init + def test_rref_proxy_class_self(self): + self._test_rref_proxy_class(rpc.get_worker_info()) + + @mock.patch.object(torch.distributed.autograd, "_init") + @mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent") + @dist_init(setup_rpc=False) + def test_register_rpc_backend_and_set_and_start_rpc_backend( + self, mock_rpc_agent, mock_dist_autograd_init + ): + backend_name = "stub_backend" + + backend = rpc.backend_registry.register_backend( + backend_name, + _stub_construct_rpc_backend_options_handler, + _stub_init_rpc_backend_handler, + ) + + with self.assertRaisesRegex( + RuntimeError, "^RPC backend .+: already registered$" + ): + backend = rpc.backend_registry.register_backend( + backend_name, + _stub_construct_rpc_backend_options_handler, + _stub_init_rpc_backend_handler, + ) + + rpc.init_rpc( + name="worker1", + backend=backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_duplicate_name(self): + with self.assertRaisesRegex(RuntimeError, "is not unique"): + store, _, _ = next( + torch.distributed.rendezvous( + self.init_method, rank=self.rank, world_size=self.world_size + ) + ) + rpc._init_rpc_backend( + backend=self.rpc_backend, + store=store, + name="duplicate_name", + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_duplicate_name_2(self): + with self.assertRaisesRegex(RuntimeError, "is not unique"): + rpc.init_rpc( + name=worker_name(self.rank % (self.world_size - 1)), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_reinit(self): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + initialize_pg(self.file_init_method, self.rank, self.world_size) + # Wait for all init to complete. + dist.barrier() + + # TODO: with TCP init, rank 0 raises Address already in use because + # rank 0 is the start daemon and the store is created before checking if + # RPC is already initialized in init_rpc. + if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0: + expected_reinit_err = "Address already in use" + else: + expected_reinit_err = "is already initialized" + + with self.assertRaisesRegex(RuntimeError, expected_reinit_err): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + rpc.shutdown() + + @dist_init(setup_rpc=False) + def test_pg_init_no_rpc_init(self): + dist.init_process_group( + backend='gloo', + init_method=self.file_init_method, + rank=self.rank, + world_size=self.world_size) + + class MyModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.lin = torch.nn.Linear(3, 4) + + def forward(self, x): + return self.lin(x) + + model = MyModel() + model.train() + model = torch.nn.parallel.DistributedDataParallel(model) + + with self.assertRaisesRegex(RuntimeError, 'Current RPC agent is not set! Did you initialize the RPC framework'): + params = [] + for param in model.parameters(): + params.append(RRef(param)) + + def test_world_size_one(self): + self._world_size_one( + torch.ones(2, 2), + torch.ones(2, 2) + ) + + @dist_init(setup_rpc=False) + def test_invalid_names(self): + + worker_id = 0 + with self.assertRaisesRegex(RuntimeError, "Worker name must match"): + info = WorkerInfo("abc*", worker_id) + + with self.assertRaisesRegex(RuntimeError, "Worker name must match"): + info = WorkerInfo(" ", worker_id) + + with self.assertRaisesRegex(RuntimeError, "must be non-empty"): + info = WorkerInfo("", worker_id) + + # If the number in the message does not match, it is likely that the + # value of MAX_NAME_LEN in RPC WorkerInfo has changed. + with self.assertRaisesRegex(RuntimeError, "shorter than 128"): + info = WorkerInfo("".join(["a" for i in range(500)]), worker_id) + + # Test that WorkerInfo can be pickled and sent in RPC call + @dist_init + def test_worker_info_pickle(self): + dst_rank = (self.rank + 1) % self.world_size + worker_info = rpc.api.get_worker_info() + ret = rpc.rpc_sync(worker_name(dst_rank), identity, args=(worker_info,)) + self.assertEqual(ret, worker_info) + + @dist_init + def test_add(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(ret, torch.ones(n, n) * 2) + + @staticmethod + def return_callee_id(): + return rpc.get_worker_info().id + + @dist_init + def test_int_callee(self): + dst_rank = (self.rank + 1) % self.world_size + ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id) + self.assertEqual(ret, dst_rank) + + @dist_init + def test_add_with_id(self): + n = self.rank + 1 + dst_rank = n % self.world_size + workder_info = rpc.get_worker_info(worker_name(dst_rank)) + + ret = rpc.rpc_sync( + workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n)) + ) + self.assertEqual(ret, torch.ones(n, n) * 2) + + @dist_init + def test_scalar_add(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n) + ) + self.assertEqual(ret, (torch.ones(n, n) + n)) + + @dist_init + def test_async_add(self): + n = self.rank + 1 + dst_rank = n % self.world_size + fut = rpc.rpc_async( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_nonzero(self): + n = self.rank + 1 + dst_rank = n % self.world_size + x = torch.ones(self.world_size, self.world_size) + x[self.rank][self.rank] = 0 + ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,)) + self.assertEqual(ret, x.nonzero()) + + @dist_init + def test_multi_rpc(self): + self._multi_rpc(False) + + @dist_init + def test_future_wait_twice(self): + dst = worker_name((self.rank + 1) % self.world_size) + futs = [] + for i in range(20): + futs.append(rpc.rpc_async(dst, raise_func)) + + with self.assertRaisesRegex(ValueError, "Expected error"): + torch.futures.wait_all(futs) + + for fut in futs: + with self.assertRaisesRegex(ValueError, "Expected error"): + fut.wait() + + @dist_init(setup_rpc=False) + def test_wait_all_workers_timeout(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + og_func = rpc.api._wait_all_workers + + def wait_all_workers_sleep(timeout): + rpc.api._all_gather(SlowPickleClass(0.5), timeout=timeout) + + rpc.api._wait_all_workers = wait_all_workers_sleep + + try: + with self.assertRaisesRegex(RuntimeError, ''): + rpc.shutdown(graceful=True, timeout=0.01) + finally: + rpc.api._wait_all_workers = og_func + dist.barrier() + + def test_wait_all_workers_dense(self): + self._wait_all_workers(heavy_rpc, torch.ones(100, 100)) + + def test_wait_all_workers_twice_dense(self): + self._wait_all_workers_twice(heavy_rpc, torch.ones(100, 100)) + + @dist_init + def test_all_gather(self): + info = rpc.get_worker_info() + results = rpc.api._all_gather(info.id) + expected = {} + for info in rpc._get_current_rpc_agent().get_worker_infos(): + expected[info.name] = info.id + + self.assertEqual(expected, results) + + @dist_init + def test_all_gather_timeout(self): + rpc._set_rpc_timeout(0.1) + + if self.rank == 0: + with self.assertRaisesRegex( + RuntimeError, + "timed out in _all_gather after 0\\.10 seconds" + ): + rpc.api._all_gather(SlowPickleClass(0.5)) + else: + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.api._all_gather(SlowPickleClass(0.5)) + + def _test_barrier_helper(self, info, names, multi_threaded=False): + names = sorted(names) + leader = names[0] + rpc.rpc_sync(leader, _reset_count) + if not multi_threaded and info.name == leader: + self.assertEqual(_rpc_barrier_count, 0) + rpc.api._barrier(names) + rpc.rpc_sync(leader, _increment_count) + rpc.api._barrier(names) + if not multi_threaded and info.name == leader: + self.assertEqual(_rpc_barrier_count, len(names)) + + @dist_init + def test_rpc_barrier_all(self): + # Test rpc barrier when called with full list of workers + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + names = [worker.name for worker in all_worker_info] + self._test_barrier_helper(info, names) + + @dist_init + def test_rpc_barrier_subset(self): + # Test rpc barrier when processes are called with different subsets of the full list + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + if info.id % 2: + names = [worker.name for worker in all_worker_info if worker.id % 2] + else: + names = [worker.name for worker in all_worker_info if not worker.id % 2] + self._test_barrier_helper(info, names) + + @dist_init + def test_rpc_barrier_partial_subset(self): + # Test rpc barrier when some processes are not involved in the barrier + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + if info.id % 2: + names = [worker.name for worker in all_worker_info if worker.id % 2] + else: + names = [f"worker{info.id}"] + self._test_barrier_helper(info, names) + + @dist_init + def test_rpc_barrier_multithreaded(self): + # This tests validates the implementation of barrier when multiple threads call into it + # We only need to check that it does not hang in this case + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + names = [worker.name for worker in all_worker_info] + threads = [] + for _ in range(3): + th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True)) + threads.append(th) + th.start() + for th in threads: + th.join() + + @dist_init + def test_graceful_shutdown_with_uneven_workload(self): + """Test graceful termination.""" + self._run_uneven_workload(heavy_rpc, torch.ones(100, 100)) + + @dist_init(setup_rpc=False) + def test_shutdown_followed_by_rpc(self): + # Initialize RPC. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(ret, torch.ones(n, n) * 2) + rpc.shutdown() + + with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"): + rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + + @dist_init + def test_expected_src(self): + dst_rank = (self.rank + 1) % self.world_size + expected_src_rank = (self.rank - 1) % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,)) + value = VALUE_FUTURE.result() + self.assertEqual(value, expected_src_rank) + + @dist_init + def test_py_built_in(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2)) + self.assertEqual(ret, min(n, n + 1, n + 2)) + + @dist_init + def test_py_user_defined(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_function, + kwargs={"a": n, "b": n + 1, "c": n + 2}, + ) + self.assertEqual(ret, my_function(n, n + 1, n + 2)) + + def test_build_rpc_profiling_key(self): + # Tests that the name that shows up as an Event in profiling RPCs has all + # the necessary information. + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + rpc_profiling_key = _build_rpc_profiling_key( + exec_mode, "foo", "worker0", "worker1" + ) + self.assertIn(exec_mode.value, rpc_profiling_key) + self.assertIn("foo", rpc_profiling_key) + self.assertIn("worker0", rpc_profiling_key) + self.assertIn("worker1", rpc_profiling_key) + + def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode): + self.assertTrue(self_worker_name in rpc_event.name) + self.assertTrue(dst_worker_name in rpc_event.name) + if isinstance(func, torch.jit.ScriptFunction): + self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name) + else: + self.assertTrue(func.__name__ in rpc_event.name) + self.assertTrue(rpc_exec_mode.value in rpc_event.name) + self.assertEqual(rpc_event.count, 1) + + @dist_init + def test_profiler_rpc_record_shapes(self): + if self.rank != 1: + return + dst = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst) + t1, t2 = torch.ones(100), torch.ones(100) + with _profile(record_shapes=True) as prof: + rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2)) + + function_events = prof.function_events + remote_events = [event for event in function_events if event.is_remote] + remote_add_event = next( + event for event in remote_events if "aten::add" in event.name + ) + remote_add_input_shapes = remote_add_event.input_shapes + # Run profiler on equivalent local op and validate shapes are the same. + with _profile(record_shapes=True) as prof: + torch.add(t1, t2) + + local_function_events = prof.function_events + local_add_event = next( + event for event in local_function_events if "aten::add" in event.name + ) + local_add_input_shapes = local_add_event.input_shapes + self.assertEqual(remote_add_input_shapes, local_add_input_shapes) + + @dist_init + def test_profiler_rpc_memory(self): + if self.rank != 1: + return + dst = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst) + with _profile(profile_memory=True) as p: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + res = fut.wait() + + function_events = p.function_events + event_cpu_mem_usages = {event.cpu_memory_usage for event in function_events} + # if cpu_memory_usage was not propagated over the wire, this set would + # only contain 0 (indicates no memory being profiled) + self.assertNotEqual({0}, event_cpu_mem_usages) + # No memory profiled if profile_memory=False + with _profile(profile_memory=False) as p: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + res = fut.wait() + + function_events = p.function_events + event_cpu_mem_usages = {event.cpu_memory_usage for event in function_events} + self.assertEqual({0}, event_cpu_mem_usages) + + @dist_init + def test_profiler_export_trace(self): + if self.rank != 1: + return + dst = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst) + with _profile() as p: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + res = fut.wait() + + events = p.function_events + with TemporaryFileName() as fname: + path = fname + p.export_chrome_trace(path) + with open(path) as f: + trace = json.load(f) + event_names = [event['name'] for event in trace] + for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]: + event_exists = any(expected_event_name in event_name for event_name in event_names) + self.assertTrue(event_exists) + + @dist_init + def test_profiler_rpc_key_names(self): + # tests that remote events are properly prefixed with the RPC profiling key. + if self.rank != 1: + return + + # Spawn multiple threads that send RPCs to ensure keys are correctly + # prefixed when there are multiple RPCs being created/in flight at the + # same time. + dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank] + + def rpc_with_profiling(dst_worker): + with _profile() as prof: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + fut.wait() + + events = prof.function_events + remote_event_names = { + event.name: event for event in events if event.is_remote + } + rpc_profiling_key = _build_rpc_profiling_key( + RPCExecMode.ASYNC, + udf_with_torch_ops.__qualname__, + worker_name(self.rank), + dst_worker, + ) + + remote_event_name_set = set(EXPECTED_REMOTE_EVENTS) + for name, event in remote_event_names.items(): + # Ensure that we have the expected key as part of the remote + # event. + self.assertTrue(name.startswith(rpc_profiling_key)) + self.assertTrue(event.is_remote) + self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id) + # Ensure that the remote event name also contains the operator. + operator_name_substr = name[len(rpc_profiling_key) :] + # Note: we don't assert that every remote event needs to be + # in the above set, the set is just a representative set of + # what we expect to see. The profiler can change and add more + # events, but we should always expect to see this representative + # set. + matching_event = { + remote_event_name + for remote_event_name in remote_event_name_set + if remote_event_name in operator_name_substr + } + remote_event_name_set -= matching_event + + # The set should be empty, otherwise its contained elements did + # not show up in the remote profiler output. + self.assertTrue( + remote_event_name_set == set(), + f"Expected {remote_event_name_set} to be included in remote profiler output.", + ) + + for dst in dst_ranks: + dst_worker = worker_name(dst) + num_parallel_rpcs = 2 + with concurrent.futures.ThreadPoolExecutor( + max_workers=num_parallel_rpcs + ) as executor: + futs = [ + executor.submit(rpc_with_profiling, dst_worker) + for _ in range(num_parallel_rpcs) + ] + # Wait for workers to finish test + for fut in futs: + fut.result() + + def _run_test_profiler_remote_events_profiled(self): + # Tests that we can successfully invoke the profiler on a remote node, + # and collect the remote events back in the local profiler. + if self.rank != 1: + return + + dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank] + for dst in dst_ranks: + dst_worker = worker_name(dst) + with _profile() as prof: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + ret = fut.wait() + + events = prof.function_events + + rpc_event = get_function_event(events, RPCExecMode.ASYNC.value) + self.check_profiling_info( + worker_name(self.rank), + dst_worker, + udf_with_torch_ops, + rpc_event, + RPCExecMode.ASYNC, + ) + + remote_events = {event.name: event for event in events if event.is_remote} + rpc_profiling_key = _build_rpc_profiling_key( + RPCExecMode.ASYNC, + udf_with_torch_ops.__qualname__, + worker_name(self.rank), + worker_name(dst), + ) + + for expected_remote_event_name in EXPECTED_REMOTE_EVENTS: + expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name + self.assertTrue(expected_key in remote_events) + remote_event = remote_events[expected_key] + # Remote event should have a node ID corresponding to the worker + # it ran on. + self.assertEqual(remote_event.node_id, dst) + + # Validate order remote events show up in profiling output. + def convert_remote_to_local(event_name): + remote_op_key = rpc_profiling_key + REMOTE_OP_STR + return event_name[ + event_name.find(remote_op_key) + + len(remote_op_key) : + ] + + remote_events_list = [ + convert_remote_to_local(event.name) + for event in events + if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS + ] + self.assertEqual( + set(remote_events_list), + set(EXPECTED_REMOTE_EVENTS), + f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}", + ) + + @dist_init + def test_profiler_remote_events_profiled(self): + self._run_test_profiler_remote_events_profiled() + + @dist_init + def test_profiler_remote_events_profiled_single_threaded(self): + self._run_test_profiler_remote_events_profiled() + + def run_profiling_workload(self, dst): + fut = rpc.rpc_async( + worker_name(dst), + torch.mul, + args=( + torch.tensor(1.0, requires_grad=True), + torch.tensor(1.0, requires_grad=True), + ), + ) + fut.wait() + + def _run_rpc_profiling_async_function(self, device="cpu"): + if self.rank != 1: + return + + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + x = torch.ones(2) + y = torch.ones(2) + with _profile() as prof: + ret = rpc.rpc_async( + dst1, slow_async_add, args=(dst2, x, y, device), timeout=20 + ) + out = ret.wait() + + function_events = prof.function_events + # slow_async_add resulted in an RPC from dst1 -> dst2, so this should be + # recorded. + key_prefix = _build_rpc_profiling_key( + RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1 + ) + + nested_rpc_key_prefix = _build_rpc_profiling_key( + RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2 + ) + expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix + remote_events = [event for event in function_events if event.is_remote] + rpc_remote_event = [ + event for event in remote_events if event.name == expected_key + ] + self.assertEqual(1, len(rpc_remote_event)) + rpc_remote_event = rpc_remote_event[0] + self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size) + # slow_async_add's RPC does an add on dst2, which should be reflected as well. + remote_add_key = ( + expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add) + ) + remote_add_event = [ + event for event in remote_events if event.name == remote_add_key + ] + self.assertEqual(1, len(remote_add_event)) + remote_add_event = remote_add_event[0] + # Validate that node_id is dst2. + self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size) + + @dist_init + def test_rpc_profiling_async_function(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + self._run_rpc_profiling_async_function() + if torch.cuda.is_available(): + dist.barrier() + self._run_rpc_profiling_async_function(device="cuda:0") + + @dist_init + def test_rpc_profiling_async_function_single_threaded(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + self._run_rpc_profiling_async_function() + if torch.cuda.is_available(): + dist.barrier() + self._run_rpc_profiling_async_function(device="cuda:0") + + @dist_init + def test_rpc_profiling_remote_record_function(self): + # test that functions run over RPC with record_function show the expected + # profiled block. + if self.rank != 1: + return + dst_ranks = [i for i in range(self.world_size) if i != self.rank] + for dst_rank in dst_ranks: + dst_worker = worker_name(dst_rank) + with _profile() as prof: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True)) + fut.wait() + + function_events = prof.function_events + record_function_remote_event = [ + evt for evt in function_events if "##forward##" in evt.name + ] + self.assertEqual(1, len(record_function_remote_event)) + record_function_remote_event = record_function_remote_event[0] + self.assertEqual(record_function_remote_event.node_id, dst_rank) + # cpu_children only returns direct children, so here we get all + # children recursively. + + def get_cpu_children(event): + if not event.cpu_children: + return [] + cpu_children = event.cpu_children + for e in event.cpu_children: + cpu_children.extend(get_cpu_children(e)) + return cpu_children + + remote_children = get_cpu_children(record_function_remote_event) + # Get local children and verify parity. + with _profile() as prof: + udf_with_torch_ops(-1, True) + + local_function_events = prof.function_events + local_record_function_event = next( + evt for evt in local_function_events if "##forward##" in evt.name + ) + local_children = get_cpu_children(local_record_function_event) + local_children_names = [ + evt.name for evt in local_children + ] + + REMOTE_OP_STR = "#remote_op: " + + def convert_remote_to_local(event_name): + remote_op_key = REMOTE_OP_STR + return event_name[ + event_name.find(remote_op_key) + len(remote_op_key) : + ] + + for evt in remote_children: + local_name = convert_remote_to_local(evt.name) + self.assertTrue(local_name in local_children_names) + + def validate_profiling_workload(self, dst, prof): + + def convert_remote_to_local(event_name): + return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :] + + events = prof.function_events + remote_events = { + convert_remote_to_local(event.name): event + for event in events + if event.is_remote + } + self.assertTrue("aten::mul" in remote_events) + remote_mul_event = remote_events["aten::mul"] + self.assertEqual(remote_mul_event.node_id, dst) + self.check_profiling_info( + worker_name(self.rank), + worker_name(dst), + torch.mul, + remote_mul_event, + RPCExecMode.ASYNC, + ) + + def _run_test_profiler_with_autograd_context(self): + dst = (self.rank + 1) % self.world_size + if self.rank == 1: + # Cases where we can double wrap messages with profiling information and autograd info. + with dist_autograd.context() as context_id: + with _profile() as prof: + self.run_profiling_workload(dst) + + self.validate_profiling_workload(dst, prof) + + # Ensure that flipped order of ctx managers results in events being + # recorded as expected. + with _profile() as prof: + with dist_autograd.context() as context_id: + self.run_profiling_workload(dst) + + self.validate_profiling_workload(dst, prof) + + @dist_init + def test_profiler_with_autograd_context_single_threaded(self): + self._run_test_profiler_with_autograd_context() + + @dist_init + def test_profiler_with_autograd_context(self): + self._run_test_profiler_with_autograd_context() + + def _profiler_test_with_rpc( + self, rpc_exec_mode, func, args, use_record_function=False, dst=None, kineto_profile=False + ): + dst = dst if dst is not None else (self.rank + 1) % self.world_size + + # only run profiler on rank 1. + p = _profile if not kineto_profile else torch.profiler.profile # kineto + if self.rank == 1: + with p() as prof: + record_function_ctx_mgr = ( + contextlib.nullcontext() + if not use_record_function + else torch.autograd.profiler.record_function( + "foo" + ) + ) + with record_function_ctx_mgr as rf: + if rpc_exec_mode == RPCExecMode.SYNC: + rpc.rpc_sync(worker_name(dst), func, args=args) + elif rpc_exec_mode == RPCExecMode.ASYNC: + fut = rpc.rpc_async(worker_name(dst), func, args=args) + if kineto_profile: + # Ensure multiple async RPCs don't cause issues. + # Would have raised + # "RuntimeError: Cannot call + # RemoteProfilerManager::setCurrentKey when current + # key is already set." error if RPC profiling was + # not disabled properly for kineto. + fut2 = rpc.rpc_async(worker_name(dst), func, args=args) + fut2.wait() + fut.wait() + else: + self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE) + rref = rpc.remote(worker_name(dst), func, args=args) + rref.to_here() + # To avoid flakiness, wait for the RRef to be profiled. This + # means that we received the acknowledgement of successful + # creation on the owner and ran the callbacks responsible + # for recording the profiling event. + rref._get_profiling_future().wait() + + events = prof.function_events if not kineto_profile else prof.events() + if kineto_profile: + # RPC profiling is disabled so there should be no rpc related + # events. + with self.assertRaises(IndexError): + get_function_event(events, rpc_exec_mode.value) + + return + + rpc_event = get_function_event(events, rpc_exec_mode.value) + # verify Node ID for this rpc event. + self.assertEqual(rpc_event.node_id, self.rank) + # Ensure recording of remote events. + remote_events = {event for event in events if event.node_id == dst} - {rpc_event} + self.assertGreaterEqual(len(remote_events), 1) + for remote_event in remote_events: + self.assertEqual(remote_event.node_id, dst) + + if use_record_function: + scope_event = get_function_event(events, "foo") + # Since RPC call is within the scope, its CPU interval should be + # contained within foo's interval. + self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start) + self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end) + # the sender, dest worker, function run, and type of RPC should all + # be recorded. + self_worker_name = worker_name(self.rank) + dst_worker_name = worker_name(dst) + self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode) + if use_record_function: + # verify order by ensuring that the outer context comes + # before the rpc event. + foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name) + rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name) + self.assertLess(foo_event_ix, rpc_event_idx) + + def _run_test_profiler_with_sync_rpc_udf(self): + self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,)) + self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,), + use_record_function=True) + + @dist_init + def test_profiler_with_sync_rpc_udf(self): + self._run_test_profiler_with_sync_rpc_udf() + + @dist_init + def test_profiler_with_sync_rpc_udf_single_threaded(self): + self._run_test_profiler_with_sync_rpc_udf() + + def _run_test_profiler_with_sync_rpc_builtin(self): + self._profiler_test_with_rpc( + RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)) + ) + self._profiler_test_with_rpc( + RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)), + use_record_function=True + ) + + @dist_init + def test_profiler_with_sync_rpc_builtin(self): + self._run_test_profiler_with_sync_rpc_builtin() + + @dist_init + def test_profiler_with_sync_rpc_builtin_single_threaded(self): + self._run_test_profiler_with_sync_rpc_builtin() + + def _run_test_profiler_with_async_rpc_udf(self): + self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,)) + self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,), + use_record_function=True) + # Test to ensure that kineto profiler enabled in RPC does not enable + # RPC profiling (it is unsupported) and does not result in issues. + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, my_sleep_func, args=(1,), kineto_profile=True + ) + + @dist_init + def test_profiler_with_async_rpc_udf(self): + self._run_test_profiler_with_async_rpc_udf() + + @dist_init + def test_profiler_with_async_rpc_udf_single_threaded(self): + self._run_test_profiler_with_async_rpc_udf() + + def _run_test_profiler_with_async_rpc_builtin(self): + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)) + ) + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)), + use_record_function=True + ) + + @dist_init + def test_profiler_with_async_rpc_builtin(self): + self._run_test_profiler_with_async_rpc_builtin() + + @dist_init + def test_profiler_with_async_rpc_builtin_single_threaded(self): + self._run_test_profiler_with_async_rpc_builtin() + + def _run_test_profiler_with_remote_udf(self): + self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,)) + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True + ) + # test remote to self + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank + ) + + @dist_init + def test_profiler_with_remote_udf(self): + self._run_test_profiler_with_remote_udf() + + @dist_init + def test_profiler_with_remote_udf_single_threaded(self): + self._run_test_profiler_with_remote_udf() + + def _run_test_profiler_with_remote_builtin(self): + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)) + ) + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)), + use_record_function=True + ) + # test remote to self + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, + torch.mul, + args=(torch.ones(1), torch.ones(1)), + dst=self.rank, + ) + + @dist_init + def test_profiler_with_remote_builtin(self): + self._run_test_profiler_with_remote_builtin() + + @dist_init + def test_profiler_with_remote_builtin_single_threaded(self): + self._run_test_profiler_with_remote_builtin() + + def _run_test_profiler_with_script_async_rpc(self): + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),) + ) + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, + my_script_func, + args=(torch.tensor(1),), + use_record_function=True, + ) + + @dist_init + def test_profiler_with_script_async_rpc(self): + self._run_test_profiler_with_script_async_rpc() + + @dist_init + def test_profiler_with_script_async_rpc_single_threaded(self): + self._run_test_profiler_with_script_async_rpc() + + def _run_test_profiler_with_script_sync_rpc(self): + self._profiler_test_with_rpc( + RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),) + ) + self._profiler_test_with_rpc( + RPCExecMode.SYNC, + my_script_func, + args=(torch.tensor(1),), + use_record_function=True, + ) + + @dist_init + def test_profiler_with_script_sync_rpc(self): + self._run_test_profiler_with_script_sync_rpc() + + @dist_init + def test_profiler_with_script_sync_rpc_single_threaded(self): + self._run_test_profiler_with_script_sync_rpc() + + def _run_test_profiler_with_script_remote_rpc(self): + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),) + ) + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, + my_script_func, + args=(torch.tensor(1),), + use_record_function=True, + ) + # test remote to self + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank + ) + + @dist_init + def test_profiler_with_script_remote_rpc(self): + self._run_test_profiler_with_script_remote_rpc() + + @dist_init + def test_profiler_with_script_remote_rpc_single_threaded(self): + self._run_test_profiler_with_script_remote_rpc() + + def _assert_top_level_events(self, process_global_events, expected_top_level_event_names): + top_level_event_names = [] + for thread_local_events in process_global_events: + # Get top-level events from all events happened on a thread. + last_end_time = 0 + for event in thread_local_events: + event_name = event.name + time_range = event.time_range + if time_range.start > last_end_time: + top_level_event_names.append(event_name) + last_end_time = time_range.end + top_level_event_names = sorted(top_level_event_names) + expected_top_level_event_names = sorted(expected_top_level_event_names) + self.assertEqual( + top_level_event_names, + expected_top_level_event_names, + f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}", + ) + + @dist_init + def test_server_process_global_profiler(self): + if self.rank != 0: + return + + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = worker_name(dst_rank) + + x = torch.tensor(1) + y = torch.tensor(2) + + outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile) + outer_profile_rref.rpc_sync().__enter__() + rpc.rpc_sync(dst_worker_name, torch.add, (x, y)) + inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile) + inner_profile_rref.rpc_sync().__enter__() + rpc.rpc_sync(dst_worker_name, torch.sub, (x, y)) + inner_profile_rref.rpc_sync().__exit__(None, None, None) + outer_profile_rref.rpc_sync().__exit__(None, None, None) + + inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,)) + expected_inner_events = ['aten::sub'] + expected_outer_events = expected_inner_events + ['aten::add'] + + self._assert_top_level_events(inner_events, expected_inner_events) + outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,)) + self._assert_top_level_events(outer_events, expected_outer_events) + + inner_profile_rref.rpc_sync().key_averages() + outer_profile_rref.rpc_sync().key_averages() + + @dist_init + def test_async_record_function_double_end_callbacks(self): + num_sleep_seconds = 1 + if self.rank == 1: + # Validate that calling the function twice results in an error. + with _profile() as pf: + with torch.autograd.profiler.record_function("foo") as rf: + fut = rpc.rpc_async( + worker_name(0), my_sleep_func, args=(num_sleep_seconds,) + ) + rf._call_end_callbacks_on_future(fut) + with self.assertRaisesRegex( + RuntimeError, "can only be called once." + ): + rf._call_end_callbacks_on_future(fut) + fut.wait() + + @dist_init + def test_async_record_function_legacy(self): + # Test the legacy _record_function ops work + # Note: These exist for backward compatibility with TorchScript + num_sleep_seconds = 1 + if self.rank == 1: + with _profile() as pf: + try: + handle = torch.ops.profiler._record_function_enter("foo", None) + fut = rpc.rpc_async( + worker_name(0), my_sleep_func, args=(num_sleep_seconds,) + ) + torch.ops.profiler._call_end_callbacks_on_jit_fut(handle, fut) + finally: + torch.ops.profiler._record_function_exit(handle) + + fut.wait() + + @dist_init + def test_async_record_function_cbs_jit_call(self): + if self.rank == 1: + with _profile() as pf: + key = _build_rpc_profiling_key( + RPCExecMode.ASYNC, + torch._jit_internal._qualified_name(my_script_func), + "worker1", + "worker0", + ) + with torch.autograd.profiler.record_function(key) as rf: + fut = rpc.rpc_async( + worker_name(0), my_script_func, args=(torch.tensor(1),) + ) + # Intentionally calling record_function internals + fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.record, fut) + result = fut.wait() + # Validate that the profiling future returns the same value as the RPC + # future. + expected = torch.add(torch.tensor(1), torch.tensor(1)) + self.assertEqual(result, expected) + events = pf.function_events + rpc_event = get_function_event( + events, torch._jit_internal._qualified_name(my_script_func) + ) + self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name) + + @dist_init + def test_py_class_constructor(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,)) + self.assertEqual(ret.a, n) + + @dist_init + def test_py_class_instance_method(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,) + ) + self.assertEqual(ret, MyClass(2).my_instance_method(n)) + + @dist_init + def test_py_class_method(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1) + ) + self.assertEqual(ret, MyClass.my_class_method(n, n + 1)) + + @dist_init + def test_py_class_static_method(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,) + ) + self.assertEqual(ret, MyClass.my_static_method(n + 10)) + + @dist_init + def test_py_multi_async_call(self): + n = self.rank + 1 + dst_rank = n % self.world_size + dst_worker_info = rpc.get_worker_info(worker_name(dst_rank)) + fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,)) + fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2)) + self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10)) + self.assertEqual(fut2.wait(), min(n, n + 1, n + 2)) + + @dist_init + def test_py_no_return_result(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), no_result) + self.assertEqual(ret, no_result()) + + @dist_init + def test_py_tensors(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_tensor_function, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n))) + + @dist_init + def test_py_tensors_multi_async_call(self): + futs = [] + n = self.rank + 1 + dst_rank = n % self.world_size + for i in range(100): + fut = rpc.rpc_async( + worker_name(dst_rank), + my_tensor_function, + args=(torch.ones(i, i), torch.ones(i, i)), + ) + futs.append(fut) + + j = 0 + for val in torch.futures.wait_all(futs): + self.assertEqual( + val, my_tensor_function(torch.ones(j, j), torch.ones(j, j)) + ) + j += 1 + + @dist_init + def test_py_tensors_in_container(self): + n = self.rank + 1 + dst_rank = n % self.world_size + a = [torch.ones(n, n), torch.ones(n, n)] + b = TensorClass(build_complex_tensors()) + c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)} + ret = rpc.rpc_sync( + worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c) + ) + self.assertEqual(ret, my_complex_tensor_function(a, b, c)) + + @dist_init + def test_py_nested_pickle(self): + n = self.rank + 1 + dst_rank = n % self.world_size + + ret = rpc.rpc_sync( + worker_name(dst_rank), + run_nested_pickle, + args=(MyPickleClass(), torch.ones(2, 2)), + ) + + m = MyPickleClass() + m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2))) + self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2))) + + @dist_init + def test_py_function_exception(self): + n = self.rank + 1 + dst_rank = n % self.world_size + with self.assertRaises(TypeError): + ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,)) + + @dist_init + def test_py_raise_in_user_func(self): + with captured_output() as (_, err): + # This barrier prevents a race condition where the main thread has + # not entered the context manager when the remote function runs. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + n = self.rank + 1 + dst_rank = n % self.world_size + fut = rpc.rpc_async(worker_name(dst_rank), raise_func) + with self.assertRaisesRegex(ValueError, expected_err): + fut.wait() + # This barrier prevents a race condition where the main thread exits + # context manager before the remote function has ran. + dist.barrier() + + # Validate that trainers log errors when running functions. + stderr_lines = err.getvalue() + self.assertTrue(expected_err in stderr_lines) + + @dist_init + def test_py_raise_in_user_func_escaped_str(self): + n = self.rank + 1 + dst_rank = n % self.world_size + fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape) + try: + fut.wait() + except ValueError as e: + msg = str(e) + # Ensure newlines are unescaped to provide a better repr of error. + self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape")) + else: + self.assertTrue(False, "expected raise_func_escape to raise ValueError.") + + @dist_init + def test_nested_rpc(self): + self._nested_rpc(nested_rpc, torch.ones(2, 2) + 1) + + @dist_init + def test_stress_light_rpc(self): + self._stress_test_rpc(light_rpc) + + @dist_init + def test_stress_heavy_rpc(self): + self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),)) + + @dist_init + def test_stress_heavy_rpc_torchscript(self): + self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),)) + + @dist_init + def test_builtin_remote_ret(self): + self._builtin_remote_ret( + torch.ones(2, 2), + torch.ones(2, 2), + torch.ones(2, 2) * 2 + ) + + @dist_init + def test_builtin_remote_self(self): + self._builtin_remote_self( + torch.ones(2, 2), + torch.ones(2, 2), + torch.ones(2, 2) * 2 + ) + + @staticmethod + def _multi_args_fn(n, sparse=False): + if sparse: + return (build_sparse_tensor(), build_sparse_tensor()) + else: + return (torch.ones(n, n), torch.ones(n, n)) + + @dist_init + def test_multi_builtin_remote_ret(self): + self._test_multi_remote_call( + torch.add, False, + args_fn=RpcTest._multi_args_fn + ) + + @dist_init + def test_py_udf_remote(self): + n = self.rank + 1 + dst_rank = n % self.world_size + rref = rpc.remote( + worker_name(dst_rank), + my_function, + kwargs={"a": n, "b": n + 1, "c": n + 2}, + ) + self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2)) + + @staticmethod + def _multi_kwargs_fn(n, sparse=False): + if sparse: + return { + "a": build_sparse_tensor(), + "b": build_sparse_tensor(), + "c": build_sparse_tensor() + } + else: + return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)} + + @dist_init + def test_multi_py_udf_remote(self): + self._test_multi_remote_call( + my_function, + False, + kwargs_fn=RpcTest._multi_kwargs_fn + ) + + @dist_init + def test_py_rref_args(self): + self._py_rref_args( + torch.ones(2, 2), + 1, + torch.ones(2, 2), + 2, + torch.ones(2, 2) * 2 + 3) + + @dist_init + def test_py_rref_args_user_share(self): + self._py_rref_args_user_share( + torch.ones(2, 2), + 1, + 2, + torch.ones(2, 2), + 3, + 4, + torch.ones(2, 2) * 2 + 10 + ) + + @dist_init + def test_py_rpc_rref_args(self): + self._py_rpc_rref_args( + torch.ones(2, 2), + 1, + 2, + torch.ones(2, 2), + 3, + 4, + torch.ones(2, 2) * 2 + 10 + ) + + @dist_init + def test_nested_remote(self): + self._nested_remote( + nested_remote, + torch.ones(2, 2) + 3 + ) + + @dist_init + def test_nested_rref(self): + self._nested_rref( + nested_rref, + torch.ones(2, 2) + 1, + torch.ones(2, 2) + 2 + ) + + @dist_init + def test_nested_rref_stress(self): + self._nested_rref_stress( + nested_rref, + torch.ones(2, 2) + 1, + torch.ones(2, 2) + 2 + ) + + @dist_init + def test_multi_layer_nested_async_rpc(self): + # This test will exit right away, but there will be a chain of async + # RPCs. The termination algorithm should detect those messages properly. + # Otherwise, some peer could exit early, leaving others to timeout + # errors or connection closed errors. + ttl = 20 + n = self.rank + 1 + dst_rank = n % self.world_size + + multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl) + + @dist_init + def test_remote_with_exception(self): + n = self.rank + 1 + dst_rank = n % self.world_size + # check ref to other workers + rref = rpc.remote(worker_name(dst_rank), raise_func) + with self.assertRaises(ValueError): + rref.to_here() + # check ref to itself + rref = rpc.remote(worker_name(self.rank), no_result, args=(10,)) + with self.assertRaises(TypeError): + rref.to_here() + + @dist_init + def test_rpc_return_rref(self): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + rref = rpc.rpc_sync( + worker_name(dst_rank1), + rpc_return_rref, + args=(worker_name(dst_rank2),), + ) + self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1) + + @dist_init + def test_rref_forward_chain(self): + ttl = 8 + n = self.rank + 1 + dst_rank = n % self.world_size + + rref = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1) + ) + + ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl) + + for i in range(ttl): + self.assertEqual(len(ret_rref), 1) + ret_rref = ret_rref[0].to_here() + + ret = ret_rref + self.assertEqual(ret, torch.add(torch.ones(n, n), 1)) + + @dist_init + def test_local_rref_no_fork(self): + local_rref = RRef(35) + self.assertEqual(local_rref.local_value(), 35) + + @dist_init + def test_local_value_not_on_owner(self): + # ensure that an error message is thrown if a user tries to call + # local_value() on a non-owning node. + next_rank = (self.rank + 1) % self.world_size + rref = rpc.remote( + worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1)) + ) + with self.assertRaisesRegex( + RuntimeError, ( + fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), " + fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), " + r"can't call localValue\(\) on user " + fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). " + fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)" + ) + ): + rref.local_value() + + @dist_init + def test_return_local_rrefs(self): + n = self.rank + 1 + dst_rank = n % self.world_size + + rref_list = rpc.rpc_sync( + worker_name(dst_rank), get_rref_list, args=([1, 2, 3],) + ) + + for rref in rref_list: + rpc.rpc_sync( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, 10), + ) + + rets = [ + rpc.rpc_sync( + rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref) + ) + for rref in rref_list + ] + + self.assertEqual(rets, [11, 12, 13]) + + @dist_init + def _test_rref_type(self, blocking): + + def launched_rpc(events): + expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner" + return any(e.name.startswith(expected_name) for e in events) + + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1)) + + with _profile() as p: + t = rref._get_type(blocking=blocking) + if not blocking: + t = t.wait() + + self.assertTrue(launched_rpc(p.function_events)) + expected_type = type(torch.ones(2)) + self.assertEqual(t, expected_type) + + futs = [] + + def verify(fut): + self.assertEqual(fut.value(), expected_type) + + with _profile() as p: + for _ in range(10): + t = rref._get_type(blocking=blocking) + if not blocking: + futs.append(t) + t.add_done_callback(verify) + t = t.wait() + self.assertEqual(t, expected_type) + + if not blocking: + # Note that cached calls with blocking=False all return the same + # cached original future. + first_fut = futs[0] + for f in futs[1:]: + self.assertTrue(f is first_fut) + # Ensure we never launch another RPC, other than for the very + # first call. + self.assertFalse(launched_rpc(p.function_events)) + self.assertEqual(t, type(torch.ones(2))) + + rref = rpc.remote(dst, MyClass, args=(0,)) + rref_type = rref._get_type(blocking=blocking) + if not blocking: + rref_type = rref_type.wait() + self.assertEqual(rref_type, MyClass) + + def test_rref_type_blocking(self): + self._test_rref_type(blocking=True) + + def test_rref_type_non_blocking(self): + self._test_rref_type(blocking=False) + + @dist_init + def _test_rref_type_with_error(self, blocking): + dst = worker_name((self.rank + 1) % self.world_size) + # 10 ms timeout + rref = rpc.remote(dst, raise_func) + # Blocking: error raised inline + if blocking: + with self.assertRaisesRegex(ValueError, "Expected error"): + rref._get_type(blocking=blocking) + else: + # Non-blocking: Immediately return future, block on wait + fut = rref._get_type(blocking=blocking) + with self.assertRaisesRegex(ValueError, "Expected error"): + fut.wait() + + + def test_rref_type_with_error_blocking(self): + self._test_rref_type_with_error(blocking=True) + + def test_rref_type_with_error_non_blocking(self): + self._test_rref_type_with_error(blocking=False) + + @dist_init + def _test_rref_type_owner(self, blocking): + rref = RRef(torch.ones(2) + 1) + rref_type = rref._get_type(blocking=blocking) + if not blocking: + rref_type = rref_type.wait() + self.assertEqual(rref_type, type(torch.ones(2))) + + rref = RRef(MyClass(0)) + rref_type = rref._get_type(blocking=blocking) + if not blocking: + rref_type = rref_type.wait() + self.assertEqual(rref_type, MyClass) + + def test_rref_type_owner_blocking(self): + self._test_rref_type_owner(blocking=True) + + def test_rref_type_owner_non_blocking(self): + self._test_rref_type_owner(blocking=False) + + @staticmethod + def _slow_add(x, y): + time.sleep(1) + return x + y + + @dist_init + def test_rref_type_slow_init(self): + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1)) + self.assertEqual(rref._get_type(), type(torch.ones(2))) + + @dist_init + def test_owner_equality(self): + a = RRef(40) + b = RRef(50) + + other_rank = (self.rank + 1) % self.world_size + other_a = rpc.remote( + worker_name(other_rank), torch.add, args=(torch.ones(1), 1) + ) + other_b = rpc.remote( + worker_name(other_rank), torch.add, args=(torch.ones(1), 1) + ) + other_a.to_here() # to ensure clean termination + other_b.to_here() + + self.assertNotEqual(a.owner(), 23) + self.assertEqual(other_a.owner(), other_b.owner()) + self.assertNotEqual(a.owner(), other_a.owner()) + self.assertEqual(other_a.owner(), other_a.owner()) + self.assertEqual(other_a.owner(), other_b.owner()) + self.assertEqual(a.owner(), a.owner()) + self.assertEqual(a.owner(), b.owner()) + self.assertEqual(a.owner(), rpc.get_worker_info()) + x = {} + x[a.owner()] = a + x[other_a.owner()] = other_a + self.assertEqual(x[a.owner()], a) + self.assertEqual(x[b.owner()], a) + self.assertEqual(x[other_a.owner()], other_a) + self.assertEqual(x[other_b.owner()], other_a) + self.assertEqual(len(x), 2) + + @dist_init + def test_pass_local_rrefs(self): + n = self.rank + 1 + dst_rank = n % self.world_size + dst_worker = worker_name(dst_rank) + + rref = RRef(40) + self.assertEqual( + rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90 + ) + self.assertEqual( + rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90 + ) + self.assertEqual( + rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90 + ) + + @dist_init + def test_remote_same_worker(self): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_a = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2) + ) + rref_b = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1) + ) + rref_c = rpc.remote( + worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4) + + @dist_init(setup_rpc=True) + def test_call_method_on_rref(self): + """ + Tests that it is possible to call an instance method on a remote object + by using rref.owner() as destination of the call. + """ + vals = [10, 2, 5, 7] + dst_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst_rank) + + # creates a remote object + rref = rpc.remote(dst_worker, MyClass, args=(vals[0],)) + + # modifies state of the remote object + rpc.rpc_sync( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, vals[1]), + ) + rpc.rpc_async( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, vals[2]), + ).wait() + rpc.remote( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, vals[3]), + ).to_here() + + # queries state of the remote object + result = rpc.rpc_sync( + dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref) + ) + + self.assertEqual(result, sum(vals)) + + # Notice `rpc.api.shutdown()` accesses + # `_delete_all_user_and_unforked_owner_rrefs` through + # `torch.distributed.rpc.api`, so patching + # `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will + # not help. + @mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs") + def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + initialize_pg(self.file_init_method, self.rank, self.world_size) + # Wait for all init to complete. + dist.barrier() + + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), + torch.add, + args=(torch.ones(2, 2), 1), + ) + + import torch.distributed.rpc.api as api + + if ignore_leak: + api._ignore_rref_leak = True + rpc.shutdown(graceful=True) + else: + api._ignore_rref_leak = False + with self.assertRaisesRegex(RuntimeError, "Leaking RRef"): + rpc.shutdown(graceful=True) + + @dist_init(setup_rpc=False) + def test_rref_leak(self): + self._test_rref_leak(ignore_leak=False) + + @dist_init(setup_rpc=False) + def test_ignore_rref_leak(self): + self._test_rref_leak(ignore_leak=True) + + @dist_init + def test_rref_str(self): + rref1 = RRef(self.rank) + id_class = "GloballyUniqueId" + self.assertEqual( + f"OwnerRRef({id_class}(created_on={self.rank}, local_id=0))", rref1.__str__() + ) + + dst_rank = (self.rank + 1) % self.world_size + rref2 = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1) + ) + self.assertEqual( + rref2.__str__(), + "UserRRef(RRefId = {0}(created_on={1}, local_id=1), ForkId = {0}(created_on={1}, local_id=2))".format( + id_class, self.rank + ), + ) + + @dist_init + def test_rref_get_future(self): + # Tests that we can obtain the future corresponding to the creation of + # the RRef on remote end + if self.rank == 0: + # Builtin + rref = rpc.remote(worker_name(1), torch.add, args=(1, 1)) + rref.to_here() + fut = rref._get_future() + self.assertIsInstance(fut, torch._C.Future) + + # UDF + rref = rpc.remote(worker_name(1), foo_add, args=()) + rref.to_here() + fut = rref._get_future() + self.assertIsInstance(fut, torch._C.Future) + + # Script + rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), )) + rref.to_here() + fut = rref._get_future() + self.assertIsInstance(fut, torch._C.Future) + + + @dist_init + def test_rref_context_debug_info(self): + # This test checks local states that are modified by remote workers. + # This means that we would need barrier before and after every check. + # The barrier before the check makes sure that all previous states are + # cleared globally, the barrier after ensures that no following states + # change gets into the current check. + initialize_pg(self.file_init_method, self.rank, self.world_size) + + # Check 1: local RRef does not update owners_ map or add a pending user. + ################################################# + + rref1 = RRef(self.rank) + + # don't need a barrier here as local RRef is handled by this thread + info = _rref_context_get_debug_info() + self.assertIn("num_owner_rrefs", info) + self.assertIn("num_pending_users", info) + # RRef on local value is not added to context until shared across RPC + self.assertEqual(0, int(info["num_owner_rrefs"])) + self.assertEqual(0, int(info["num_pending_users"])) + # barrier after the check 1 + dist.barrier() + + # Check 2: Sharing RRef as an arg should update owners_ map + ########################################################### + + dst_rank = (self.rank + 1) % self.world_size + rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,)) + + # barrier before check 2 + wait_until_pending_futures_and_users_flushed() + dist.barrier() + + info = _rref_context_get_debug_info() + self.assertIn("num_owner_rrefs", info) + self.assertEqual(1, int(info["num_owner_rrefs"])) + # no pending users since the fork is finished + self.assertEqual(0, int(info["num_pending_users"])) + # barrier after check 2 + dist.barrier() + + # clear states for check 2 + rpc.rpc_sync(worker_name(dst_rank), clear_global_rref) + + # Wait for owner rref to be cleared. + while int(info["num_owner_rrefs"]) != 0: + info = _rref_context_get_debug_info() + time.sleep(0.1) + dist.barrier() + + # Check 3: rpc.remote call should update owners_ map + #################################################### + rref2 = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1) + ) + rref3 = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1) + ) + rref2.to_here() + rref3.to_here() + + # barrier before check 3 + wait_until_pending_futures_and_users_flushed() + dist.barrier() + + info = _rref_context_get_debug_info() + self.assertIn("num_owner_rrefs", info) + self.assertEqual(2, int(info["num_owner_rrefs"])) + # no pending users since the fork is finished + self.assertEqual(0, int(info["num_pending_users"])) + + # barrier after check 3 + dist.barrier() + + @dist_init + def test_disable_gil_profiling(self): + # test that rpc.enable_gil_profiling(false) will result in + # GIL wait time not being recorded. + + # GIL profiling should be disabled by default. + dst_rank = (self.rank + 1) % self.world_size + rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1)) + ) + info = rpc.api._get_current_rpc_agent().get_debug_info() + self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"]) + rpc.enable_gil_profiling(True) + rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1)) + ) + info = rpc.api._get_current_rpc_agent().get_debug_info() + self.assertIn("agent.gil_average_wait_time_us", info) + + @dist_init(setup_rpc=False) + def test_local_shutdown(self): + # test that we can start RPC and then immediately locally shutdown + # without sending any messages. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + # pass in graceful=False to ensure that we don't wait for other workers. + rpc.shutdown(graceful=False) + + @dist_init + def test_debug_info(self): + # only test keys in this test case. Values should be covered by + # individual module debug info tests + import torch.distributed.autograd as dist_autograd + + info = _get_debug_info() + rref_info = _rref_context_get_debug_info() + agent_info = rpc.api._get_current_rpc_agent().get_debug_info() + autograd_info = dist_autograd._get_debug_info() + common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys() + self.assertEqual(0, len(common_keys)) + expected = {} + expected.update(rref_info) + expected.update(agent_info) + expected.update(autograd_info) + # NB: Key ordering is only preserved in python 3.6+. So here, we + # manually check keys are equal. + for key in expected.keys(): + self.assertIn(key, info.keys()) + + for key in info.keys(): + self.assertIn(key, expected.keys()) + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + IS_MACOS, + "Test is flaky on MacOS since libuv error handling is not as robust as TCP", + ) + def test_handle_send_exceptions(self): + # test that if a callee node has gone down, we raise an appropriate + # exception instead of just crashing. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + rpc._set_rpc_timeout(10) + # This barrier is needed to ensure that some workers do not exit before + # others have been brought up. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + if self.rank == 1: + dst_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst_rank) + # allow destination worker to exit without joining + error_str = self.get_shutdown_error_regex() + wait_until_node_failure(dst_rank, error_str) + fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3)) + # Shutdown sequence is not very well defined and as a result + # we can see any of the error messages defined in get_shutdown_error_regex. + with self.assertRaisesRegex(RuntimeError, error_str): + fut.wait() + # exit all workers non-gracefully. + rpc.shutdown(graceful=False) + + @dist_init + def test_deadlock(self): + # this test is copied from https://github.com/pytorch/pytorch/issues/45089 + if self.rank == 1: + dst1 = worker_name((self.rank + 1) % self.world_size) + x = torch.ones(2) + y = torch.ones(2) + rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait() + + dist_initialized = dist.is_initialized() + if not dist_initialized: + dist.init_process_group( + backend="gloo", + init_method=self.file_init_method, + rank=self.rank, + world_size=self.world_size, + ) + + @dist_init(setup_rpc=False) + def test_local_shutdown_with_rpc(self): + # test that we can start RPC, send RPCs, and then run local shutdown. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + n = self.rank + 1 + dst_rank = n % self.world_size + rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + # A barrier is needed to ensure that all RPCs are processed. + # Otherwise, some RPCs can timeout since the receiving end + # has terminated. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + # pass in graceful=False to ensure that we don't wait for other workers. + rpc.shutdown(graceful=False) + + @dist_init(setup_rpc=False) + def test_set_and_get_default_rpc_timeout(self): + timeout = 0.5 + + # A new `RpcBackendOptions` is constructed + # when accessing `self.rpc_backend_options`. + rpc_backend_options = self.rpc_backend_options + rpc_backend_options.rpc_timeout = timeout + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + set_timeout = rpc.get_rpc_timeout() + self.assertEqual(timeout, set_timeout) + rpc.shutdown() + + @dist_init + def test_default_timeout_used(self): + """ + Tests that if no timeout is passed into rpc_async and rpc_sync, then the + default timeout is used. + """ + dst_rank = (self.rank + 1) % self.world_size + rpc._set_rpc_timeout(0.001) # 1 ms + # futures should time out and be marked with an exception indicating it as such. + futs = [ + rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()) + for _ in range(10) + ] + expected_error = self.get_timeout_error_regex() + for fut in futs: + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # ensure that if a new timeout is set old futures don't time out but new ones do. + rpc._set_rpc_timeout(200) # 200 seconds + # create a longstanding RPC. + fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,)) + # now, set a short timeout. + rpc._set_rpc_timeout(0.001) + # fut2 should time out, fut1 should not. + fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,)) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut2.wait() + fut1.wait() + + # Zero timeout means infinity, so future should run to completion. + rpc._set_rpc_timeout(0) + rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait() + + # reset to default timeout so shutdown messages can process cleanly. + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init + def test_rpc_timeouts(self): + # TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803) + dst_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst_rank) + timeout = 0.1 # 100 ms + expected_error = self.get_timeout_error_regex() + # Test async UDF + fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure run to completion if there is no timeout and we use the default + # RPC timeout. + rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait() + + # Test sync UDF + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout) + + # Ensure run to completion if there is no timeout and we use the default + # RPC timeout. + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,)) + + # If we set a default timeout for RPCs, it should be respected, though + # still overridden if we pass in a different timeout to the APIs. + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,)) + + # The RPCs should run to completion since we override the timeout. + rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait() + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5) + # Passing in a zero timeout should ensure that the RPC won't time out. + rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait() + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0) + # Reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + def test_dist_init_decorator(self): + @dist_init(setup_rpc=False) + def test_func(self): + return "expected result" + + self.assertEqual(test_func(self), "expected result") + + @dist_init + def test_func(self): + return "expected result" + + self.assertEqual(test_func(self), "expected result") + + def test_use_rpc_pickler(self): + class TestPickler: + pass + + test_pickler = TestPickler() + with _use_rpc_pickler(test_pickler): + self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler) + self.assertTrue( + torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler + ) + + @dist_init + def test_wait_all(self): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1)) + self.assertTrue(len(_thread_local_var.future_list) == 1) + self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future)) + self.assertTrue(fut.done()) + self.assertEqual(fut.wait(), torch.ones(2, 2) + 1) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_multiple_call(self): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + for i in range(20): + fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1)) + res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1)) + self.assertEqual(res, torch.ones(i, i) + 1) + self.assertEqual(fut.wait(), torch.ones(i, i) + 1) + self.assertTrue(len(_thread_local_var.future_list) == 20) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_timeout(self): + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + timeout = 0.1 # 100 ms + fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_raise_in_user_func(self): + with self.assertRaises(ValueError): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, raise_func) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_raise_in_body(self): + with self.assertRaises(ValueError): + with _wait_all(): + raise_func() + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_custom_exception_throw_during_reconstruction(self): + """ + Test that we still throw info about the remote side exception even when + we cannot recreate it on client side. + """ + initialize_pg(self.file_init_method, self.rank, self.world_size) + if self.rank != 0: + exc_caught = False + dst = worker_name(0) + try: + rpc.rpc_sync(dst, custom_raise_func, args=()) + except RuntimeError as e: + exc_caught = True + msg = str(e) + print(f"Got msg {msg}") + self.assertTrue("Original exception on remote side was" in msg) + self.assertTrue("CustomException" in msg) + except BaseException as e: + raise RuntimeError( + f"Failure - expected RuntimeError, got {e}" + ) from e + finally: + self.assertTrue(exc_caught) + + dist.barrier() + + + timed_out_rpc_event = None + + @staticmethod + def timed_out_rpc(): + RpcTest.timed_out_rpc_event.wait() + + @dist_init + def test_wait_all_exit_early_python(self): + # Initialize the event in the subprocess. + RpcTest.timed_out_rpc_event = Event() + + # Wait for all processes to initialize event. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + + dst = worker_name((self.rank + 1) % self.world_size) + fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc) + fut2 = rpc.rpc_async(dst, raise_func) + fut3 = rpc.rpc_async(dst, raise_func) + + # We should receive the error from fut2 + with self.assertRaisesRegex(ValueError, expected_err): + torch.futures.wait_all([fut1, fut2, fut3]) + + # Unblock RPC thread for fut1 + RpcTest.timed_out_rpc_event.set() + + @dist_init + def test_wait_all_exit_early_builtin(self): + # Initialize the event in the subprocess. + RpcTest.timed_out_rpc_event = Event() + + # Wait for all processes to initialize event. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + + dst = worker_name((self.rank + 1) % self.world_size) + fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc) + fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5))) + fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5))) + + # We should receive the error from fut2 + with self.assertRaisesRegex(RuntimeError, "size of tensor"): + torch.futures.wait_all([fut1, fut2, fut3]) + + # Unblock RPC thread for fut1 + RpcTest.timed_out_rpc_event.set() + + @dist_init + def test_wait_all_exit_early_script_function(self): + # Initialize the event in the subprocess. + RpcTest.timed_out_rpc_event = Event() + + # Wait for all processes to initialize event. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + + dst = worker_name((self.rank + 1) % self.world_size) + fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc) + fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,)) + fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,)) + + # We should receive the error from fut2 + with self.assertRaisesRegex(RuntimeError, expected_err): + torch.futures.wait_all([fut1, fut2, fut3]) + + # Unblock RPC thread for fut1 + RpcTest.timed_out_rpc_event.set() + + + @dist_init + def test_function_not_on_callee(self): + # test that if a function does not exist on a callee, we don't crash, + # instead we get an AttributeError indicating that the func does not exist. + this_module = sys.modules[__name__] + caller_worker = "worker0" + callee_worker = "worker1" + + if self.rank == 1: + # Use delattr to remove the binding of a func on this nodes + delattr(this_module, "foo_add") + # notify remote end that we have removed it. + rpc.rpc_sync(caller_worker, set_value, args=(self.rank,)) + + if self.rank == 0: + # func exists on caller, but not callee. + # wait for remote end to remove the binding of foo_add func. + wait_for_value_future() + # Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error. + self.assertTrue(hasattr(this_module, "foo_add")) + with self.assertRaisesRegex( + RuntimeError, "RPC pickler does not serialize" + ): + rpc.rpc_sync(callee_worker, foo_add, args=()) + + @dist_init + def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + a = MyClass(1) + b = MyClass(2) + + # This is to make Python not garbage collect a and b. + a.other = b + b.other = a + + n = self.rank + a.rref = rpc.remote( + dst_worker_name, + torch.add, + args=(torch.ones(n, n), 2) + ) + + @dist_init(setup_rpc=False) + def test_use_rref_after_shutdown(self): + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + n = self.rank + 1 + dst_rank = n % self.world_size + rref = rpc.remote( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + # pass in graceful=True to ensure that local UserRRefs are deleted. + rpc.shutdown(graceful=True) + + with self.assertRaisesRegex( + RuntimeError, "Cannot call to_here\\(\\) on it after deletion." + ): + rref.to_here() + + with self.assertRaisesRegex( + RuntimeError, "Cannot call fork an UserRRef after deletion." + ): + import torch.distributed.rpc.internal as internal + internal.serialize(rref) + + @staticmethod + def _return_gpu_tensor(): + return torch.rand(3, 3).cuda(0) + + @staticmethod + def _return_gpu_tensor_list(): + return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)] + + @staticmethod + def _gpu_tensor_list_arg(tensor_list): + return torch.rand(3, 3) + + def _create_rref(self): + owner_rank = (self.rank + 2) % self.world_size + return rpc.remote( + worker_name(owner_rank), + torch.add, + args=(torch.zeros(2, 2), 1) + ) + + @dist_init + def test_user_rrefs_confirmed(self): + dst_rank = (self.rank + 1) % self.world_size + rref = self._create_rref() + ret = rpc.rpc_sync( + worker_name(dst_rank), + check_rref_confirmed, + args=(rref,) + ) + self.assertEqual(ret, True) + + @dist_init + def test_user_rrefs_confirmed_remote(self): + dst_rank = (self.rank + 1) % self.world_size + rref = self._create_rref() + ret_rref = rpc.remote( + worker_name(dst_rank), + check_rref_confirmed, + args=(rref,) + ) + self.assertEqual(ret_rref.to_here(), True) + + @dist_init + def test_rref_py_pickle_not_supported(self): + local_rref = RRef(35) + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"): + torch.save(local_rref, fname) + + @dist_init + def test_remote_throw(self): + rref = rpc.remote(worker_name((self.rank + 1) % self.world_size), + raise_or_inc, + args=(torch.ones(2),)) + with self.assertRaisesRegex(Exception, ".*Expected error.*"): + rref.to_here() + + @dist_init + def test_non_cont_tensors(self): + if self.rank == 0: + # Create a non-contiguous tensor. + t = torch.rand(5, 5) + t_view = t.narrow(1, 2, 2) + self.assertFalse(t_view.is_contiguous()) + t_cont = t_view.contiguous() + self.assertTrue(t_cont.is_contiguous()) + self.assertEqual(t_view, t_cont) + + # Send non-cont tensor over RPC. + next_rank = (self.rank + 1) % self.world_size + t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont)) + + # Verify the returned tensor. + self.assertEqual(t_view, t_ret) + self.assertFalse(t_ret.is_contiguous()) + + @dist_init + def test_callback_simple(self): + set_by_cb = concurrent.futures.Future() + n = self.rank + 1 + + def callback(fut): + ret = fut.wait() + self.assertEqual(ret, torch.ones(n, n) * 2) + set_by_cb.set_result(ret.clone() + 1) + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + fut.then(callback) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1) + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_callback_wrong_arg_num(self): + set_by_cb = concurrent.futures.Future() + n = self.rank + 1 + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + cb_fut = fut.then(my_function) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + with self.assertRaisesRegex( + RuntimeError, + "my\\_function\\(\\) missing 2 required positional arguments" + ): + cb_fut.wait() + + @dist_init + def test_callback_wrong_arg_type(self): + dst = worker_name((self.rank + 1) % self.world_size) + + fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1)) + fut1 = fut0.then(lambda x: x + 1) + + with self.assertRaisesRegex( + RuntimeError, + "unsupported operand type\\(s\\) for \\+" + ): + fut1.wait() + + @dist_init + def test_callback_multi(self): + num_cbs = 10 + n = self.rank + 1 + + def callback(idx, fut): + ret = fut.wait() + self.assertEqual(ret, torch.ones(n, n) * 2) + return ret + idx + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + cb_futs = [] + for idx in range(num_cbs): + cb_futs.append(fut.then(partial(callback, idx))) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + for idx in range(num_cbs): + self.assertEqual( + cb_futs[idx].wait(), + torch.ones(n, n) * 2 + idx + ) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_callback_chain(self): + n = self.rank + 1 + dst = worker_name(n % self.world_size) + + def callback(fut): + return fut.wait() + 1 + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), 1) + ) + + num_cbs = 20 + for _ in range(num_cbs): + fut = fut.then(callback) + + self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs) + + @dist_init + def test_callback_in_rpc(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + ret = rpc.rpc_sync( + dst1, + add_use_future_cb, + args=(dst2, torch.ones(2, 2), 1, 2) + ) + self.assertEqual(ret, torch.ones(2, 2) + 1 + 2) + + @dist_init + def test_callback_with_ret(self): + dst = worker_name((self.rank + 1) % self.world_size) + + def callback(fut0): + fut2 = rpc.rpc_async( + dst, + torch.add, + args=(fut0.wait(), 1) + ).then(lambda fut1: fut1.wait() + 1) + + return fut2.wait() + + fut3 = rpc.rpc_async( + dst, + torch.add, + args=(torch.ones(2, 2), 1) + ).then(callback) + + self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3) + + @dist_init + def test_callback_with_error(self): + dst = worker_name((self.rank + 1) % self.world_size) + + def callback(fut0): + with self.assertRaisesRegex(ValueError, "Expected error"): + fut0.wait() + raise RuntimeError("Another expected error") + + fut1 = rpc.rpc_async(dst, raise_func).then(callback) + with self.assertRaisesRegex(RuntimeError, "Another expected error"): + fut1.wait() + + @dist_init + def test_callback_none(self): + dst = worker_name((self.rank + 1) % self.world_size) + with self.assertRaisesRegex( + TypeError, + "incompatible function arguments." + ): + rpc.rpc_async(dst, raise_func).then(None) + + @dist_init + def test_add_done_callback(self): + set_by_cb = False + n = self.rank + 1 + + def callback(fut): + nonlocal set_by_cb + fut.wait() + set_by_cb = True + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + fut.add_done_callback(callback) + fut_then = fut.then(lambda _: True) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + # We have no guarantee that the add_done_callback fn will execute before the test finishes. + # Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback + fut_then.wait() + self.assertTrue(set_by_cb) + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_mark_future_twice(self): + fut = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + torch.add, + args=(torch.zeros(2, 2), 1) + ) + self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1) + with self.assertRaisesRegex( + RuntimeError, + "Future can only be marked completed once" + ): + fut.set_result(1) + + @dist_init + def test_pickle_future(self): + fut = torch.futures.Future() + errMsg = "Can not pickle torch.futures.Future" + + dst = worker_name((self.rank + 1) % self.world_size) + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, errMsg): + rpc.rpc_sync(dst, fail_on_fut, args=(fut,)) + + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, errMsg): + rpc.rpc_async(dst, fail_on_fut, args=(fut,)) + + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, errMsg): + rpc.remote(dst, fail_on_fut, args=(fut,)) + + @dist_init + def test_future_done(self): + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1)) + fut.wait() + self.assertTrue(fut.done()) + + @dist_init + def test_future_done_exception(self): + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, raise_func) + with self.assertRaisesRegex(ValueError, "Expected error"): + fut.wait() + self.assertTrue(fut.done()) + + def _test_future_cb(self, func): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + ret = rpc.rpc_sync( + dst1, + func, + args=(dst2, torch.ones(2, 2), 1, 2) + ) + self.assertEqual(ret, torch.ones(2, 2) + 1 + 2) + + @dist_init + def test_future_in_rpc(self): + self._test_future_cb(add_use_future_set_result) + + @dist_init + def test_future_nested_callback(self): + self._test_future_cb(add_use_future_nested_cb) + + def _test_async_function_raise(self, mode): + with self.assertRaisesRegex(RuntimeError, "Expected error"): + self._run_func_in_mode( + worker_name((self.rank + 1) % self.world_size), + async_raise_func, + mode + ) + + @dist_init + def test_async_function_raise(self): + self._test_async_function_raise(RPCExecMode.SYNC) + + @dist_init + def test_async_function_raise_async(self): + self._test_async_function_raise(RPCExecMode.ASYNC) + + @dist_init + def test_async_function_raise_remote(self): + self._test_async_function_raise(RPCExecMode.REMOTE) + + def _test_async_function_wrong_return_type(self, mode): + errMsg = ( + "Functions decorated with @rpc\\.async_function must return a " + "torch\\.futures\\.Future object," + ) + with self.assertRaisesRegex(RuntimeError, errMsg): + self._run_func_in_mode( + worker_name((self.rank + 1) % self.world_size), + async_wrong_type, + mode + ) + + @dist_init + def test_async_function_wrong_return_type(self): + self._test_async_function_wrong_return_type(RPCExecMode.SYNC) + + @dist_init + def test_async_function_wrong_return_type_async(self): + self._test_async_function_wrong_return_type(RPCExecMode.ASYNC) + + @dist_init + def test_async_function_wrong_return_type_remote(self): + self._test_async_function_wrong_return_type(RPCExecMode.REMOTE) + + @dist_init + def test_async_function_simple(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + def _test_async_function(self, fn, mode=RPCExecMode.SYNC): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + args = (dst2, torch.ones(2, 2), 1, 2) + ret = self._run_func_in_mode(dst1, fn, mode, args=args) + self.assertEqual(ret, torch.ones(2, 2) + 3) + + @dist_init + def test_async_function_with_future_ctor(self): + self._test_async_function(async_add_with_future_ctor) + + @dist_init + def test_async_function_with_future_ctor_remote(self): + self._test_async_function( + async_add_with_future_ctor, + RPCExecMode.REMOTE + ) + + @dist_init + def test_async_function_chained(self): + self._test_async_function(async_add_chained) + + @dist_init + def test_async_function_chained_remote(self): + self._test_async_function(async_add_chained, RPCExecMode.REMOTE) + + @dist_init + def test_async_function_nested(self): + self._test_async_function(async_add_nested) + + @dist_init + def test_async_function_nested_remote(self): + self._test_async_function(async_add_nested, RPCExecMode.REMOTE) + + @dist_init + def test_async_static_method(self): + self._test_async_function(AsyncExecutionClass.static_async_add) + + @dist_init + def test_async_static_method_remote(self): + self._test_async_function( + AsyncExecutionClass.static_async_add, + RPCExecMode.REMOTE + ) + + @dist_init + def test_async_class_method(self): + self._test_async_function(AsyncExecutionClass.class_async_add) + + @dist_init + def test_async_class_method_remote(self): + self._test_async_function( + AsyncExecutionClass.class_async_add, + RPCExecMode.REMOTE + ) + + def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + rref = rpc.remote(dst1, AsyncExecutionClass) + + x = torch.ones(2, 2) + y = torch.ones(2, 2) + 1 + if mode == RPCExecMode.SYNC: + ret = rref.rpc_sync().static_async_add(dst2, x, x, y) + ret += rref.rpc_sync().class_async_add(dst2, x, x, y) + ret += rref.rpc_sync().bound_async_add(dst2, x, x, y) + elif mode == RPCExecMode.ASYNC: + ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait() + ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait() + ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait() + elif mode == RPCExecMode.REMOTE: + ret = rref.remote().static_async_add(dst2, x, x, y).to_here() + ret += rref.remote().class_async_add(dst2, x, x, y).to_here() + ret += rref.remote().bound_async_add(dst2, x, x, y).to_here() + + self.assertEqual(ret, 3 * 4 * x) + + @dist_init + def test_async_class_rref_proxy(self): + self._test_test_async_class_rref_proxy() + + @dist_init + def test_async_class_rref_proxy_async(self): + self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC) + + @dist_init + def test_async_class_rref_proxy_remote(self): + self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE) + + def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + num = 20 + step = 3 + args = (dst2, torch.ones(2, 2), num, step) + ret = self._run_func_in_mode(dst1, fn, mode, args=args) + self.assertEqual(ret, torch.ones(2, 2) + num * step) + + @dist_init + def test_async_function_multi_chained(self): + self._test_async_function_multi(async_add_chained_multi) + + @dist_init + def test_async_function_multi_chained_async(self): + self._test_async_function_multi( + async_add_chained_multi, + RPCExecMode.ASYNC + ) + + @dist_init + def test_async_function_multi_chained_remote(self): + self._test_async_function_multi( + async_add_chained_multi, + RPCExecMode.REMOTE + ) + + @dist_init + def test_async_function_multi_fanout(self): + self._test_async_function_multi(async_add_multi_fanout) + + @dist_init + def test_async_function_multi_fanout_async(self): + self._test_async_function_multi( + async_add_multi_fanout, + RPCExecMode.ASYNC + ) + + @dist_init + def test_async_function_multi_fanout_remote(self): + self._test_async_function_multi( + async_add_multi_fanout, + RPCExecMode.REMOTE + ) + + def _test_return_future(self, mode): + with self.assertRaisesRegex( + RuntimeError, + "Can not pickle torch.futures.Future" + ): + self._run_func_in_mode( + worker_name((self.rank + 1) % self.world_size), + return_future, + mode + ) + + @dist_init + def test_return_future(self): + self._test_return_future(RPCExecMode.SYNC) + + @dist_init + def test_return_future_async(self): + self._test_return_future(RPCExecMode.ASYNC) + + @dist_init + def test_return_future_remote(self): + self._test_return_future(RPCExecMode.REMOTE) + + @dist_init + def test_rref_timeout(self): + # This test is similar to ones in FaultyProcessGroupTest, but is meant to be + # run with other backends besides ProcessGroup. + if self.rank != 0: + return + + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # 10 ms timeout + rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01) + # Future corresponding to the remote creation should time out. + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rref._get_future().wait() + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref.to_here() + + wait_until_owners_and_forks_on_rank(1, 1, rank=1) + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + os.environ.get("RPC_INIT_WITH_TCP", None) == "1", + "init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614." + ) + def test_init_pg_then_rpc(self): + dist.init_process_group( + backend="gloo", + init_method=self.init_method, + rank=self.rank, + world_size=self.world_size, + ) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + # Test RPC. + next_rank = (self.rank + 1) % self.world_size + ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + # Test PG + dist.barrier() + + rpc.shutdown() + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + os.environ.get("RPC_INIT_WITH_TCP", None) == "1", + "init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614." + ) + def test_init_rpc_then_pg(self): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + dist.init_process_group( + backend="gloo", + init_method=self.init_method, + rank=self.rank, + world_size=self.world_size, + ) + + # Test RPC. + next_rank = (self.rank + 1) % self.world_size + ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + # Test PG + dist.barrier() + + rpc.shutdown() + + @dist_init + def test_wait_all_with_exception(self): + futs = [] + dst = worker_name((self.rank + 1) % self.world_size) + for _ in range(10): + futs.append(rpc.rpc_async(dst, raise_func)) + + with self.assertRaisesRegex(ValueError, "Expected error"): + ret = torch.futures.wait_all(futs) + + @dist_init + def test_wait_all_with_partial_exception(self): + futs = [] + dst = worker_name((self.rank + 1) % self.world_size) + for _ in range(10): + futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1))) + + futs.append(rpc.rpc_async(dst, raise_func)) + + with self.assertRaisesRegex(ValueError, "Expected error"): + ret = torch.futures.wait_all(futs) + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + os.environ.get("RPC_INIT_WITH_TCP", None) == "1", + "Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491", + ) + def test_init_rpc_twice(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + rpc.shutdown() + + # Wait for all init to complete. + dist.barrier() + + # Use a different file name for the next initialization + new_backend_options = self.rpc_backend_options + new_backend_options.init_method += "init_2" + + # Ensure rpc initialization works again. + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=new_backend_options, + ) + + # Verify RPCs work after re-init. + dst = worker_name((self.rank + 1) % self.world_size) + rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1)) + rpc.rpc_sync(dst, foo_add, args=()) + + rpc.shutdown() + + def test_wrong_types(self): + with self.assertRaisesRegex( + TypeError, + "Argument backend must be a member of BackendType", + ): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + backend="TENSORPIPE", + ) + + with self.assertRaisesRegex( + TypeError, + "Argument rpc_backend_options must be an instance of RpcBackendOptions", + ): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + backend=self.rpc_backend, + rpc_backend_options={"init_method": self.init_method} + ) + + def test_cannot_infer_backend_from_options(self): + # An exception should be raised if the backend isn't specified but + # options are given which are not an instance of any of the known + # agents' option classes. + rpc_backend_options = FooBackendOptions(self.init_method) + + with self.assertRaisesRegex(TypeError, "Could not infer backend for options"): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + # Do _not_ pass backend. + rpc_backend_options=rpc_backend_options, + ) + + @dist_init + def test_owner_rref_backward(self): + dst = worker_name((self.rank + 1) % self.world_size) + t1 = torch.rand(10, 10, requires_grad=True) + rref = rpc.RRef(t1.sum() + t1.sum()) + rref.backward() + expected_grad = torch.ones_like(t1) * 2 + self.assertEqual(expected_grad, t1.grad) + + with dist_autograd.context() as context_id: + t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1)) + rref = rpc.RRef(t2.sum()) + rref.backward(context_id) + self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1]) + + # Double backward. + with dist_autograd.context() as context_id: + t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1)) + rref = rpc.RRef(t2.sum()) + rref.backward(context_id, retain_graph=True) + rref.backward(context_id) + self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1]) + + # Test errors. + with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"): + rpc.RRef(torch.rand(10)).backward() + + with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"): + rpc.RRef(torch.rand(10, requires_grad=True)).backward() + + with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"): + rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100) + + with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"): + rpc.RRef("foo").backward() + + @staticmethod + def _sum(x): + return x.sum() + + @staticmethod + def _identity(x): + return x + + @dist_init + def test_user_rref_backward(self): + dst = worker_name((self.rank + 1) % self.world_size) + t = torch.rand(10, requires_grad=True) + with dist_autograd.context() as context_id: + rref = rpc.remote(dst, RpcTest._sum, args=(t,)) + rref.backward(context_id, retain_graph=True) + rref.backward(context_id) + self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t]) + + with dist_autograd.context() as context_id: + rref = rpc.remote(dst, RpcTest._identity, args=("foo",)) + with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"): + rref.backward(context_id) + + with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"): + rref.backward() + + @dist_init(setup_rpc=False) + def test_shutdown_errors(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + if self.rank != 0: + og_func = rpc.api._broadcast_to_followers + og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs + + # Monkey-patch _broadcast_to_followers to fail, which would ensure + # _all_gather on leader raises an exception. + def raise_error(sequence_id, objects_map): + og_func(sequence_id, objects_map) + raise RuntimeError('simulation') + + # Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail, + # which would ensure barrier is not called on followers. + def rref_error(): + raise RuntimeError('simulation rref') + + try: + rpc.api._broadcast_to_followers = raise_error + rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error + with self.assertRaisesRegex(RuntimeError, 'simulation rref'): + rpc.shutdown() + finally: + rpc.api._broadcast_to_followers = og_func + rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func + else: + with self.assertRaisesRegex(RuntimeError, 'timed out in _all_gather'): + rpc.shutdown() + + dist.barrier() + + @dist_init + def test_my_parameter_server(self): + self._my_parameter_server(False) + + +class CudaRpcTest(RpcAgentTestFixture): + + @skip_if_lt_x_gpu(2) + @dist_init + def test_profiler_remote_cuda(self): + if self.rank != 1: + return + + dst_cuda_0 = (self.rank + 1) % self.world_size + dst_cuda_1 = (self.rank + 2) % self.world_size + dst_worker_cuda_0 = worker_name(dst_cuda_0) + dst_worker_cuda_1 = worker_name(dst_cuda_1) + + with _profile(use_cuda=True) as p: + fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, )) + fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, )) + fut1.wait() + fut2.wait() + + def get_name(event): + return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):] + + function_events = p.function_events + for event in function_events: + if event.is_async: + self.assertEqual(0, event.cuda_time_total) + self.assertEqual([], event.kernels) + self.assertEqual(0, event.cuda_time) + else: + if event.node_id == 1: + continue + self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1]) + if get_name(event) in EXPECTED_REMOTE_EVENTS: + self.assertGreater(event.cuda_time_total, 0) + self.assertEqual(1, len(event.kernels)) + kernel = event.kernels[0] + if event.node_id == dst_cuda_0: + self.assertEqual(kernel.device, 0) + if event.node_id == dst_cuda_1: + self.assertEqual(kernel.device, 1) + self.assertGreater(event.cuda_time, 0) + + # Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled + # events. + remote_events = [event for event in function_events if event.is_remote] + remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS] + self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS)) + + +class TensorPipeAgentRpcTest(RpcAgentTestFixture, RpcTestCommon): + + def test_mismatched_type_for_options(self): + # An exception should be raised if the options are not an instance of + # TensorPipeRpcBackendOptions. + rpc_backend_options = FooBackendOptions(self.init_method) + + with self.assertRaisesRegex( + TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`" + ): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + backend=rpc.BackendType.TENSORPIPE, + rpc_backend_options=rpc_backend_options, + ) + + def test_infer_backend_from_options(self): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.init_method, + _transports=tp_transports() + ) + + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + # Do _not_ pass backend. + rpc_backend_options=rpc_backend_options, + ) + + self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent) + + # FIXME Merge this test with the corresponding one in RpcTest. + @dist_init(setup_rpc=False) + def test_set_and_get_num_worker_threads(self): + NUM_THREADS = 27 + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=NUM_THREADS, + _transports=tp_transports(), + ) + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + info = rpc.api._get_current_rpc_agent().get_debug_info() + self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS) + rpc.shutdown() + + # FIXME Merge this test with the corresponding one in RpcTest. + @dist_init(setup_rpc=False) + def test_tensorpipe_set_default_timeout(self): + # Set a high timeout since it doesn't affect test runtime and ensures + # the test doesn't erroneously timeout due to slow machines. + timeout = 100 + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=self.rpc_backend_options.num_worker_threads, + rpc_timeout=timeout, + _transports=tp_transports(), + ) + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + default_timeout = rpc.get_rpc_timeout() + self.assertEqual(default_timeout, timeout) + rpc.shutdown() + + # FIXME Merge this test with the corresponding one in RpcTest. + @dist_init(setup_rpc=False) + def test_tensorpipe_options_throw_on_timedelta_timeout(self): + from datetime import timedelta + + timeout = timedelta() + # Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails + with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=self.rpc_backend_options.num_worker_threads, + rpc_timeout=timeout, + ) + + @dist_init + def _test_rref_get_type_timeout(self, blocking): + # Test where we try to get the type of a RRef from an owner, but RRef + # creation is slower than timeout passed into _get_type. + dst_rank = (self.rank + 1) % self.world_size + dst = worker_name(dst_rank) + slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True)) + timeout = 0.5 + expected_err = self.get_timeout_error_regex() + # Blocking: blocks on inline call + if blocking: + with self.assertRaisesRegex(RuntimeError, expected_err): + slow_rref._get_type(timeout=timeout, blocking=blocking) + # Non-blocking: blocks on wait + else: + fut = slow_rref._get_type(timeout=timeout, blocking=blocking) + with self.assertRaisesRegex(RuntimeError, expected_err): + fut.wait() + + # FIXME We wait until the remote completed creating the OwnerRRef + # because there's currently a race if we shut down RPC before that. + slow_rref.to_here() + + def test_rref_get_type_timeout_blocking(self): + self._test_rref_get_type_timeout(blocking=True) + + def test_rref_get_type_timeout_non_blocking(self): + self._test_rref_get_type_timeout(blocking=False) + + @dist_init + def test_op_with_invalid_args(self): + dst = worker_name((self.rank + 1) % self.world_size) + with self.assertRaisesRegex( + RuntimeError, "Overloaded torch operator invoked from Python failed to many any schema" + ): + rpc.rpc_sync(dst, torch.add, args=()) + + def _test_rref_proxy_timeout(self, rref_proxy_api): + dst_rank = (self.rank + 1) % self.world_size + dst = worker_name(dst_rank) + rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), )) + # Ensure RRef is created on remote node. + rref.to_here() + rref_api = getattr(rref, rref_proxy_api) + self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}") + expected_error = self.get_timeout_error_regex() + timeout = 2 + with self.assertRaisesRegex(RuntimeError, expected_error): + result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2)) + if rref_api == rref.rpc_async: + result.wait() + elif rref_api == rref.remote: + result._get_future().wait() + + # Case where rpc.remote() is stuck and exceeds timeout + slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True)) + timeout = 0.01 + rref_api = getattr(slow_rref, rref_proxy_api) + # Note that even when we call rref.rpc_async() in this case, we + # time out in future creation, not waiting for future. This is because + # rref proxy function calls rref._get_type before returning future, + # which blocks on the RRef being created on owner node, until the + # specified timeout. + with self.assertRaisesRegex(RuntimeError, expected_error): + result = rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2)) + # rpc_async returns immediately and surface a timeout through wait() + if rref_api == slow_rref.rpc_async: + result.wait() + + # FIXME We wait until the remote completed creating the OwnerRRef + # because there's currently a race if we shut down RPC before that. + slow_rref.to_here() + + @dist_init + def test_rref_proxy_timeout(self): + for rpc_api in ["rpc_sync", "rpc_async", "remote"]: + self._test_rref_proxy_timeout(rpc_api) + + @dist_init + def test_send_to_rank_sparse(self): + dst_rank = (self.rank + 1) % self.world_size + + # Test sparse tensor + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + x = build_sparse_tensor() + y = build_sparse_tensor() + expected_tensor = (x + y) + ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y)) + self.assertEqual(expected_tensor, ret) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + x = build_sparse_tensor(coalesce=True) + y = build_sparse_tensor(coalesce=True) + expected_tensor = (x + y) + ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y)) + self.assertEqual(expected_tensor, ret) + + @dist_init + def test_self_py_udf_remote_sparse(self): + self._self_py_udf_remote( + rpc.get_worker_info(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_rpc_arg_sparse(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_rpc_arg( + dst, + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_self_rpc_arg_sparse(self): + self._self_remote_rref_as_rpc_arg( + rpc.get_worker_info(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_remote_arg_sparse(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_remote_arg( + dst, + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_self_remote_arg_sparse(self): + self._self_remote_rref_as_remote_arg( + rpc.get_worker_info(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + def test_world_size_one_sparse(self): + self._world_size_one( + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_multi_rpc_sparse(self): + self._multi_rpc(True) + + def test_wait_all_workers_sparse(self): + self._wait_all_workers(heavy_rpc_sparse, build_sparse_tensor()) + + def test_wait_all_workers_twice_sparse(self): + self._wait_all_workers_twice(heavy_rpc_sparse, build_sparse_tensor()) + + @dist_init + def test_py_sparse_tensors_in_container(self): + n = self.rank + 1 + dst_rank = n % self.world_size + a = [build_sparse_tensor(), build_sparse_tensor()] + ret = rpc.rpc_sync( + worker_name(dst_rank), my_container_sum, args=(a,) + ) + self.assertEqual(ret, my_container_sum(a)) + + @dist_init + def test_nested_rpc_sparse(self): + self._nested_rpc(nested_rpc_sparse, build_sparse_tensor() * 2) + + @dist_init + def test_stress_heavy_rpc_sparse(self): + self._stress_test_rpc(heavy_rpc_sparse, repeat=20, args=(build_sparse_tensor(),)) + + @dist_init + def test_builtin_remote_ret_sparse(self): + self._builtin_remote_ret( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 2 + ) + + @dist_init + def test_builtin_remote_self_sparse(self): + self._builtin_remote_self( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 2 + ) + + @dist_init + def test_multi_builtin_remote_ret_sparse(self): + self._test_multi_remote_call( + torch.add, True, + args_fn=RpcTest._multi_args_fn + ) + + @dist_init + def test_multi_py_udf_remote_sparse(self): + self._test_multi_remote_call( + my_function, + True, + kwargs_fn=RpcTest._multi_kwargs_fn + ) + + @dist_init + def test_py_rref_args_sparse(self): + self._py_rref_args( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 4 + ) + + @dist_init + def test_py_rref_args_user_share_sparse(self): + self._py_rref_args_user_share( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 6 + ) + + @dist_init + def test_py_rpc_rref_args_sparse(self): + self._py_rpc_rref_args( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 6 + ) + + @dist_init + def test_nested_remote_sparse(self): + self._nested_remote( + nested_remote_sparse, + build_sparse_tensor() + build_sparse_tensor() + ) + + @dist_init + def test_nested_rref_sparse(self): + self._nested_rref( + nested_rref_sparse, + build_sparse_tensor() * 2, + build_sparse_tensor() * 2 + ) + + @dist_init + def test_nested_rref_stress_sparse(self): + self._nested_rref_stress( + nested_rref_sparse, + build_sparse_tensor() * 2, + build_sparse_tensor() * 2 + ) + + @dist_init + def test_my_parameter_server_sparse(self): + self._my_parameter_server(True) + + # Test init_rpc without world_size argument + @dist_init(setup_rpc=False) + def test_dynamic_rpc_init_rpc(self): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + rpc.shutdown() + + # Dynamic RPC new ranks communicate with existing ranks + @dist_init(setup_rpc=False) + def test_dynamic_rpc_new_rank_can_communicated_with_existing_rank(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + if self.rank == 0: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + # Rank 0 will be initialized with RPC after this barrier + dist.barrier() + + if self.rank != 0: + # Newly joined ranks will be able to communicate with rank 0, since that was created first + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + result = rpc.rpc_sync(worker_name(0), torch.add, args=(torch.tensor(1), torch.tensor(1))) + self.assertEqual(torch.add(torch.tensor(1), torch.tensor(1)), result) + + # Barrier to ensure that all rpc_sync calls are finished + dist.barrier() + rpc.shutdown() + + # Dynamic RPC existing ranks can communicate with new ranks + @dist_init(setup_rpc=False) + def test_dynamic_rpc_existing_rank_can_communicate_with_new_rank(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + if self.rank == 0: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + # Rank 0 will be initialized with RPC after this barrier + dist.barrier() + + # Rest of ranks join after barrier + if self.rank != 0: + # Newly joined ranks will be able to communicate with rank 0, since that was created first + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + dist.barrier() + if self.rank == 0: + for i in range(1, self.world_size): + result = rpc.rpc_sync(worker_name(i), torch.add, args=(torch.tensor(1), torch.tensor(1))) + self.assertEqual(torch.add(torch.tensor(1), torch.tensor(1)), result) + + # Barrier to ensure that all rpc_sync calls are finished + dist.barrier() + rpc.shutdown() + + # Dynamic RPC existing ranks can communicate with new ranks using CUDA rpc + @skip_if_lt_x_gpu(2) + @dist_init(setup_rpc=False) + def test_dynamic_rpc_existing_rank_can_communicate_with_new_rank_cuda(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + if self.rank == 0: + options = self.rpc_backend_options + for i in range(1, self.world_size): + dst = worker_name(i) + options.set_device_map(dst, {1: 0}) + options.set_device_map(dst, {0: 1}) + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=options, + ) + + # Rank 0 will be initialized with RPC after this barrier + dist.barrier() + + # Rest of ranks join after barrier + if self.rank != 0: + # Newly joined ranks will be able to communicate with rank 0, since that was created first + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + # TODO: Cuda RPC is failing due to: + # terminate called after throwing an instance of 'c10::Error' + # what(): 0 <= device && static_cast(device) < device_allocator.size() + # INTERNAL ASSERT FAILED at "../c10/cuda/CUDACachingAllocator.cpp":1937, + # please report a bug to PyTorch. Allocator not initialized for device 1: did you call init? + # dist.barrier() + # if self.rank == 0: + # for i in range(1, self.world_size): + # x = torch.ones(2) + # result_on_device_0 = rpc.rpc_sync(worker_name(i), torch.add, args=(x.to(0), 1)) + # result_on_device_1 = rpc.rpc_sync(worker_name(i), torch.add, args=(x.to(1), 1)) + # self.assertEqual(torch.add(torch.ones(2), 1), result_on_device_0) + # self.assertEqual(torch.device('cuda:0'), result_on_device_0.device) + # self.assertEqual(torch.add(torch.ones(2), 1), result_on_device_1) + # self.assertEqual(torch.device('cuda:1'), result_on_device_1.device) + + # Barrier to ensure that all rpc_sync calls are finished + dist.barrier() + rpc.shutdown() + + @dist_init(setup_rpc=False) + def test_dynamic_rpc_init_rpc_without_rank(self): + # default initialization uses file init + with self.assertRaisesRegex(ValueError, "rank parameter missing"): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rpc_backend_options=self.rpc_backend_options, + ) + + # env init + with self.assertRaisesRegex(ValueError, "environment variable RANK expected"): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method="env://") + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rpc_backend_options=rpc_backend_options, + ) + + # tcp init + with self.assertRaisesRegex(ValueError, "rank parameter missing"): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method="tcp://127.0.0.1:23456") + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rpc_backend_options=rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_dynamic_and_static_init_rpc_together(self): + # Initialize a static rpc group with size = self.world_size - 1 + dist.init_process_group( + backend='gloo', + init_method=self.file_init_method, + rank=self.rank, + world_size=self.world_size) + + world_size_minus_one = self.world_size - 1 + if self.rank < world_size_minus_one: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=world_size_minus_one, + rpc_backend_options=self.rpc_backend_options, + ) + + dist.barrier() + + # Attempt to add an additional dynamic group member + if self.rank == world_size_minus_one: + # Expect error message to be thrown + with self.assertRaisesRegex(RuntimeError, "RPC group mixes statically and dynamically\ + initialized members which is not supported."): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + +class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture, RpcTestCommon): + + def _test_device_maps(self, options, errMsg): + with self.assertRaisesRegex(ValueError, errMsg): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + self.assertFalse(rpc.api._is_current_rpc_agent_set()) + + @skip_if_lt_x_gpu(2) + def test_device_maps_wrong_worker_name(self): + options = self.rpc_backend_options + options.set_device_map("none_exist", {0: 1}) + + self._test_device_maps( + options, + errMsg="Node worker0 has invalid target node names in its device maps" + ) + + @skip_if_lt_x_gpu(1) + def test_device_maps_invalid_max_local_device(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {torch.cuda.device_count(): 0}) + + self._test_device_maps( + options, + errMsg="Node worker0 has source devices with invalid indices in its device map for worker1" + ) + + @skip_if_lt_x_gpu(1) + def test_device_maps_invalid_max_remote_device(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {0: torch.cuda.device_count()}) + + self._test_device_maps( + options, + errMsg="Node worker0 has target devices with invalid indices in its device map for worker1" + ) + + @skip_if_lt_x_gpu(2) + def test_device_maps_many_to_one(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {1: 0}) + options.set_device_map(dst, {0: 0}) + + self._test_device_maps( + options, + errMsg="Node worker0 has duplicated target devices in its device map for worker1" + ) + + @skip_if_lt_x_gpu(2) + def test_device_maps_one_to_many(self): + if self.rank == 0: + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {0: 1}) + with self.assertRaisesRegex( + ValueError, "`set_device_map` only supports 1-to-1 mapping" + ): + options.set_device_map(dst, {0: 0}) + + @skip_if_lt_x_gpu(1) + def test_device_maps_invalid_min_device(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + with self.assertRaisesRegex( + RuntimeError, "Device index must not be negative" + ): + options.set_device_map(dst, {-1: 0}) + + with self.assertRaisesRegex( + RuntimeError, "Device index must not be negative" + ): + options.set_device_map(dst, {0: -1}) + + @staticmethod + def _gpu_add(x, y): + if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]): + return (x + y).to(0) + else: + raise ValueError("Wrong device affinity") + + @skip_if_lt_x_gpu(2) + def test_device_maps_gpu(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {0: 1, 1: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add, + args=(torch.zeros(2).to(0), torch.ones(2).to(0)) + ) + self.assertEqual(ret.device, torch.device(1)) + self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1)) + rpc.shutdown() + + @staticmethod + def _gpu_add_given_devices(x, y, x_to, y_to, z_to): + x_device = "cpu" if x.device.type == "cpu" else x.device.index + y_device = "cpu" if y.device.type == "cpu" else y.device.index + if x_device == x_to and y_device == y_to: + return x.to(z_to) + y.to(z_to) + else: + raise ValueError("Wrong device affinity") + + def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None): + fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn + x_to = device_map[x_from] + y_to = device_map[y_from] + + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst + options.set_device_map(dst, device_map) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + x = torch.zeros(2).to(x_from) + y = torch.ones(2).to(y_from) + + ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to)) + + reverse_device_map = {device_map[k] : k for k in device_map} + z_from = reverse_device_map[z_to] + + ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index + self.assertEqual(ret_device, z_from) + self.assertEqual(ret, torch.ones(2).to(z_from)) + + rpc.shutdown() + + def test_device_map_cpu(self): + self._test_device_maps_gpu( + x_from="cpu", + y_from="cpu", + z_to="cpu", + device_map={"cpu" : "cpu"}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(1) + def test_device_map_cpu_to_gpu_default(self): + self._test_device_maps_gpu( + x_from="cpu", + y_from="cpu", + z_to=0, + device_map={"cpu" : 0}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_cpu_to_gpu_non_default(self): + self._test_device_maps_gpu( + x_from="cpu", + y_from="cpu", + z_to=1, + device_map={"cpu" : 1}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(1) + def test_device_map_gpu_to_cpu_default(self): + self._test_device_maps_gpu( + x_from=0, + y_from=0, + z_to="cpu", + device_map={0 : "cpu"}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_to_cpu_non_default(self): + self._test_device_maps_gpu( + x_from=1, + y_from=1, + z_to="cpu", + device_map={1 : "cpu"}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_default(self): + self._test_device_maps_gpu( + x_from=0, + y_from=0, + z_to=0, + device_map={0 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_non_default(self): + self._test_device_maps_gpu( + x_from=1, + y_from=1, + z_to=1, + device_map={1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_default_to_non_default(self): + self._test_device_maps_gpu( + x_from=0, + y_from=0, + z_to=1, + device_map={0 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_non_default_to_default(self): + self._test_device_maps_gpu( + x_from=1, + y_from=1, + z_to=0, + device_map={1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_1(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_2(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_3(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_4(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_5(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_6(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_7(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_8(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_1(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_2(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_3(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_4(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_5(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_6(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_7(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_8(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @staticmethod + def _gpu_add_multi_gpu(x, y): + if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]): + return x.to(0) + y, x - y.to(1) + else: + raise ValueError("Wrong device affinity") + + def _test_device_maps_multi_gpu(self, dst): + options = self.rpc_backend_options + options.set_device_map(dst, {0: 1}) + options.set_device_map(dst, {1: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + x = torch.zeros(2).to(0) + y = torch.ones(2).to(1) + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu, + args=(x, y) + ) + + self.assertEqual(rets[0].device, torch.device(1)) + self.assertEqual(rets[1].device, torch.device(0)) + self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1)) + self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0)) + rpc.shutdown() + + @skip_if_lt_x_gpu(2) + def test_device_maps_multi_gpu(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._test_device_maps_multi_gpu(dst) + + @skip_if_lt_x_gpu(2) + def test_device_maps_multi_gpu_self(self): + dst = worker_name(self.rank) + self._test_device_maps_multi_gpu(dst) + + @staticmethod + def _gpu_add_return_to_gpu(x, y): + if x.device.type == 'cpu' and y.device.type == 'cpu': + return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3) + else: + raise ValueError("Wrong device affinity") + + @skip_if_lt_x_gpu(2) + def test_device_maps_in_options(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc.TensorPipeRpcBackendOptions( + init_method=options.init_method, + num_worker_threads=options.num_worker_threads, + device_maps={dst: {0: 1, 1: 0}}, + _transports=tp_transports() + ) + ) + + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu, + args=(torch.zeros(2).to(0), torch.ones(2).to(1)) + ) + self.assertEqual(rets[0].device, torch.device(1)) + self.assertEqual(rets[1].device, torch.device(0)) + self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1)) + self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0)) + rpc.shutdown() + + def _test_device_maps_return_to_gpu(self, dst): + options = self.rpc_backend_options + + options.set_device_map(dst, {0: 1}) + options.set_device_map(dst, {1: 2}) + options.set_device_map(dst, {2: 3}) + options.set_device_map(dst, {3: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu, + args=(torch.zeros(2), torch.ones(2)) + ) + for i in range(len(rets)): + self.assertEqual(rets[i].device, torch.device((3 + i) % 4)) + self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3)) + self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0)) + self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1)) + self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2)) + rpc.shutdown() + + @skip_if_lt_x_gpu(4) + def test_device_maps_return_to_gpu(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._test_device_maps_return_to_gpu(dst) + + @skip_if_lt_x_gpu(4) + def test_device_maps_return_to_gpu_self(self): + dst = worker_name(self.rank) + self._test_device_maps_return_to_gpu(dst) + + @staticmethod + def _add_to_gpu(x, y): + return (x + y).to(0) + + def _test_device_maps_missing_config(self, mode): + dst = worker_name((self.rank + 1) % self.world_size) + errMsg = ( + "TensorPipe RPC backend only supports CPU tensors by default.*" + "`set_device_map` on `TensorPipeRpcBackendOptions`" + ) + + with self.assertRaisesRegex(RuntimeError, errMsg): + if mode == RPCExecMode.SYNC: + rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1)) + elif mode == RPCExecMode.REMOTE: + rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here() + else: + raise ValueError(f"unexpected mode {mode}") + + # make sure RPC is still functioning + ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1)) + self.assertEqual(ret, torch.ones(2) + 1) + + def _test_device_maps_missing_config_response(self, mode): + dst = worker_name((self.rank + 1) % self.world_size) + errMsg = "Response device mapping is not available" + + with self.assertRaisesRegex(RuntimeError, errMsg): + if mode == RPCExecMode.SYNC: + rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._add_to_gpu, + args=(torch.zeros(2), 1) + ) + elif mode == RPCExecMode.REMOTE: + rpc.remote( + dst, + TensorPipeAgentCudaRpcTest._add_to_gpu, + args=(torch.zeros(2), 1) + ).to_here() + else: + raise ValueError(f"unexpected mode {mode}") + + # make sure RPC is still functioning + ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1)) + self.assertEqual(ret, torch.ones(2) + 1) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config(self): + self._test_device_maps_missing_config(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + def test_device_maps_missing_config_not_timeout(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options + ) + + timeout = rpc.get_rpc_timeout() + + tik = time.time() + self._test_device_maps_missing_config(RPCExecMode.SYNC) + rpc.shutdown() + tok = time.time() + + self.assertTrue(tok - tik < timeout) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_loop(self): + for _ in range(self.rpc_backend_options.num_worker_threads + 5): + self._test_device_maps_missing_config(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_response(self): + self._test_device_maps_missing_config_response(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_response_loop(self): + for _ in range(self.rpc_backend_options.num_worker_threads + 5): + self._test_device_maps_missing_config_response(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_remote(self): + self._test_device_maps_missing_config(RPCExecMode.REMOTE) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_remote_response(self): + self._test_device_maps_missing_config_response(RPCExecMode.REMOTE) + + @skip_if_lt_x_gpu(2) + def test_device_maps_remote(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {1: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rref = rpc.remote( + dst, + TensorPipeAgentCudaRpcTest._add_to_gpu, + args=(torch.zeros(2), 1) + ) + + self.assertEqual(rref.to_here().device.index, 1) + self.assertEqual(rref.to_here(), torch.ones(2).to(1)) + + rpc.shutdown() + + @staticmethod + def _slow_add_on_user_stream(x, y): + s0 = torch.cuda.current_stream(x.device) + s1 = torch.cuda.Stream(device=x.device) + s1.wait_stream(s0) + x.record_stream(s1) + y.record_stream(s1) + with torch.cuda.stream(s1): + torch.cuda._sleep(10 * FIFTY_MIL_CYCLES) + z = x + y + s0.wait_stream(s1) + z.record_stream(s0) + return z + + def _test_custom_stream(self, fn, device_map): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, device_map) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + fn(dst) + + rpc.shutdown() + + def _test_stream_sync(self, dst): + x = torch.ones(2, 2).to(0) + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._slow_add_on_user_stream, + args=(x, x) + ) + self.assertEqual(ret, 2 * x) + + @skip_if_lt_x_gpu(2) + def test_custom_stream(self): + self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"}) + + def _test_stream_multi_async(self, dst): + futs = [] + for i in range(20): + x = torch.ones(2, 2).to(0) * i + futs.append( + rpc.rpc_async( + dst, + TensorPipeAgentCudaRpcTest._slow_add_on_user_stream, + args=(x, x) + ) + ) + + for i in range(20): + self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i) + + @skip_if_lt_x_gpu(2) + def test_custom_stream_multi(self): + self._test_custom_stream( + self._test_stream_multi_async, + {"cuda:0": "cuda:1"} + ) + + @staticmethod + def _nested_slow_add_on_user_stream(dst, x, y, z): + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._slow_add_on_user_stream, + args=(x, y) + ) + + return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z) + + def _test_stream_nested_sync(self, dst): + x = torch.ones(2, 2).to(0) + y = torch.ones(2, 2).to(0) * 2 + z = torch.ones(2, 2).to(0) * 3 + nested_dst = worker_name((self.rank + 2) % self.world_size) + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream, + args=(nested_dst, x, y, z) + ) + self.assertEqual(ret, 6 * x) + + @skip_if_lt_x_gpu(2) + def test_custom_stream_nested(self): + self._test_custom_stream( + self._test_stream_nested_sync, + {"cuda:0": "cuda:1", "cuda:1": "cuda:0"} + ) + + def _test_stream_nested_multi_async(self, dst): + if self.rank == 0: + futs = [] + n = 5 + xs, ys, zs = [], [], [] + for i in range(n): + x = torch.ones(2, 2).to(0) * (i - 1) + y = torch.ones(2, 2).to(0) * i + z = torch.ones(2, 2).to(0) * (i + 1) + xs.append(x) + ys.append(y) + zs.append(z) + nested_dst = worker_name((self.rank + 2) % self.world_size) + futs.append( + rpc.rpc_async( + dst, + TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream, + args=(nested_dst, x, y, z) + ) + ) + + for i in range(n): + self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i]) + + @skip_if_lt_x_gpu(2) + def test_custom_stream_nested_multi(self): + self._test_custom_stream( + self._test_stream_nested_multi_async, + {"cuda:0": "cuda:1", "cuda:1": "cuda:0"} + ) + + @staticmethod + def _gpu_add_wrong_gpus(x, y): + if x.is_cuda and y.is_cuda: + return x.cpu() + y.cuda() + else: + raise ValueError("Wrong device affinity") + + @skip_if_lt_x_gpu(1) + def test_device_mismatch(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {0: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + x = torch.zeros(2).to(0) + y = torch.ones(2).to(0) + + with self.assertRaisesRegex( + RuntimeError, + "Expected all tensors to be on the same device, but found at least two devices" + ): + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus, + args=(x, y) + ) + + rpc.shutdown() + + def _test_rref_synchronization(self, local_device, remote_device): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {local_device : remote_device}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 1: + # This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here() + # If to_here() is properly synchronized with forward(x) the results must be identical + # This test needs multiple iterations and significant batch size to simulate real + # training of a CNN of MNIST-like data. + # see https://github.com/pytorch/pytorch/issues/54771 + rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,)) + for _ in range(10): + x = torch.randn(200, 1, 28, 28).to(local_device) + actual = rref.remote().forward(x).to_here() + expected = rref.rpc_sync().forward(x) + self.assertEqual(actual, expected) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_rref_to_here_synchronization1(self): + self._test_rref_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_to_here_synchronization2(self): + self._test_rref_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_to_here_synchronization3(self): + self._test_rref_synchronization("cuda:1", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_rref_to_here_synchronization4(self): + self._test_rref_synchronization("cuda:0", "cuda:1") + + def _test_rref_as_arg_synchronization( + self, + local_device, + remote_device, + devicesOptions=None + ): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {local_device: remote_device}) + + input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size) + options.set_device_map(input_src, {remote_device: local_device}) + + if devicesOptions is not None: + options.set_devices(devicesOptions[self.rank]) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 1: + # This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here() + # If to_here() is properly synchronized with forward(x) the results must be identical + # This test needs multiple iterations and significant batch size to simulate real + # training of a CNN of MNIST-like data. + # see https://github.com/pytorch/pytorch/issues/54771 + rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,)) + for _ in range(10): + rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device)) + actual = rref.remote().forward(rref_x, True).to_here() + expected = rref.rpc_sync().forward(rref_x, True) + self.assertEqual(actual, expected) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_rref_as_arg_synchronization1(self): + self._test_rref_as_arg_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_as_arg_synchronization2(self): + self._test_rref_as_arg_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_as_arg_synchronization3(self): + self._test_rref_as_arg_synchronization("cuda:1", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_rref_as_arg_synchronization4(self): + self._test_rref_as_arg_synchronization("cuda:0", "cuda:1") + + @skip_if_lt_x_gpu(1) + def test_rref_as_arg_synchronization5(self): + self._test_rref_as_arg_synchronization( + "cuda:0", + "cuda:0", + [["cuda:0"] for _ in range(4)], # devicesOptions + ) + + @staticmethod + def _rref_relay(rref): + return rref.to_here() + + def _test_rref_forward_synchronization(self, local_device, remote_device): + options = self.rpc_backend_options + + input_src = worker_name(0) + model_dst = worker_name(1) + out_relay = worker_name(2) + + if self.rank == 0: + # for 1) model construction 2) forward execution + options.set_device_map(model_dst, {local_device: remote_device}) + + # Forward output will be first copied to the relay node before + # returning to the worker. This is intentional, to test RRef + # forward CUDA stream synchronizations. + options.set_device_map(out_relay, {local_device: local_device}) + elif self.rank == 1: + # worker1 hosts the model and runs forward. The forward functions + # calls RRef.to_here(), hence needs to configure the device map + options.set_device_map(input_src, {remote_device: local_device}) + elif self.rank == 2: + # worker2 will get the out RRef and call to_here() and hence, needs + # to configure device map. + options.set_device_map(model_dst, {local_device: remote_device}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 0: + # This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here() + # If to_here() is properly synchronized with forward(x) the results must be identical + # This test needs multiple iterations and significant batch size to simulate real + # training of a CNN of MNIST-like data. + # see https://github.com/pytorch/pytorch/issues/54771 + rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,)) + for _ in range(10): + rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device)) + rref_out = rref.remote().forward(rref_input, True) + out = rpc.remote( + out_relay, + TensorPipeAgentCudaRpcTest._rref_relay, + args=(rref_out,) + ).to_here() + expected = rref.rpc_sync().forward(rref_input, True) + self.assertEqual(out, expected) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_rref_forward_synchronization1(self): + self._test_rref_forward_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_forward_synchronization2(self): + self._test_rref_forward_synchronization("cuda:0", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_rref_forward_synchronization3(self): + self._test_rref_forward_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_forward_synchronization4(self): + self._test_rref_forward_synchronization("cuda:1", "cuda:1") + + def _test_owner_rref_forward_synchronization(self, local_device, remote_device): + if self.rank == 0: + options = self.rpc_backend_options + options.set_device_map("w0", {local_device: remote_device}) + rpc.init_rpc( + "w0", + rank=0, + world_size=1, + rpc_backend_options=options + ) + + model = rpc.remote( + "w0", torch.nn.Linear, (2048, 20000) + ).remote().to(remote_device) + for _ in range(30): + data = torch.rand(2048, 2048).to(local_device) + output = model.rpc_sync().forward(data) + # to_here() internally calls localValue as the caller is + # the owner of the RRef. + v0 = rpc.RRef(output).remote().sum().to_here().item() + v1 = output.sum().item() + self.assertEqual(v0, v1) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_owner_rref_forward_synchronization1(self): + self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_owner_rref_forward_synchronization2(self): + self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_owner_rref_forward_synchronization3(self): + self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_owner_rref_forward_synchronization4(self): + self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1") + + @staticmethod + def _return_tensor_view(i): + x = torch.ones(1000, 200).cuda(0) * i + torch.cuda._sleep(10 * FIFTY_MIL_CYCLES) + # serialization of the return value will create a new tensor from the + # view, which is done outside of the user function. + return x.split(100)[0] + + @skip_if_lt_x_gpu(1) + def test_tensor_view_as_return_value(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {0 : 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + futs = [] + for i in range(5): + futs.append(rpc.rpc_async( + dst, + TensorPipeAgentCudaRpcTest._return_tensor_view, + args=(i,) + )) + + for i in range(5): + self.assertEqual(torch.ones(100, 200) * i, futs[i].wait()) + + rpc.shutdown() + + @skip_if_lt_x_gpu(2) + def test_devices_option_mismatch(self): + with self.assertRaisesRegex( + ValueError, + "Node worker0 has unexpected source devices in its device map for worker1" + ): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {0 : 0}) + options.set_devices([1]) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rpc.shutdown() + + @skip_if_lt_x_gpu(2) + def test_devices_option_mismatch_reverse(self): + with self.assertRaisesRegex( + ValueError, + "Node worker0 has unexpected target devices in its device map for worker1" + ): + dst = worker_name((self.rank + 1) % self.world_size) + + options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=self.rpc_backend_options.num_worker_threads, + device_maps={dst: {0 : 1}}, + devices=[0] + ) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_as_int(self): + fut = Future(devices=[0]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_as_str(self): + fut = Future(devices=["cuda:0"]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_as_device(self): + fut = Future(devices=[torch.device("cuda", 0)]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_not_cuda(self): + with self.assertRaisesRegex( + ValueError, "Expected devices to have indices, got cpu" + ): + fut = Future(devices=["cpu"]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_cuda_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=False + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_list_with_cuda_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=False + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self): + self._test_cuda_future_extraction( + wrapper=TensorWrapper, unwrapper=lambda v: v.tensor, sparse_tensor=False + ) + + @skip_if_lt_x_gpu(2) + def test_cuda_future_callback_changes_devices(self): + # We check proper CUDA stream synchronization by filling the tensor with + # the expected value in one stream, and reading it from another stream. + tensor0 = torch.zeros((100,), device="cuda:0") + tensor1 = torch.zeros((100,), device="cuda:1") + parent_future = Future(devices=["cuda:0", "cuda:1"]) + + def cb(fut): + t0 = fut.value() + tensor1.copy_(t0, non_blocking=True) + return tensor1 + + child_future = parent_future.then(cb) + with torch.cuda.device("cuda:0"): + stream = torch.cuda.Stream() + with torch.cuda.stream(stream): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor0.fill_(1) + parent_future.set_result(tensor0) + with torch.cuda.device("cuda:1"): + another_stream = torch.cuda.Stream() + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(child_future.wait(), 1).all().item()) + + @skip_if_lt_x_gpu(2) + def test_cuda_future_value_on_bad_device(self): + tensor0 = torch.zeros((100,), device="cuda:0") + tensor1 = torch.zeros((100,), device="cuda:1") + parent_future = Future(devices=["cuda:1"]) + + # As a plus, we test that futures still invoke callbacks even in case of + # error, and that the child futures are successful if those callbacks + # don't access the parent future. + def cb(fut): + with torch.cuda.device("cuda:1"): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor1.fill_(1) + return tensor1 + + child_future = parent_future.then(cb) + with torch.cuda.device("cuda:0"): + stream = torch.cuda.Stream() + with torch.cuda.stream(stream): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor0.fill_(1) + parent_future.set_result(tensor0) + with self.assertRaisesRegex( + ValueError, + r"The result contained tensors residing on device\(s\) cuda:0 " + r"which are not among the expected device\(s\) cuda:1", + ): + parent_future.wait() + with torch.cuda.device("cuda:1"): + another_stream = torch.cuda.Stream() + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(child_future.wait(), 1).all().item()) + + @skip_if_lt_x_gpu(1) + def test_async_execution_with_cuda_future(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {"cuda:0": "cuda:0"}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + t = torch.zeros((100,), device="cuda:0") + fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,)) + another_stream = torch.cuda.Stream("cuda:0") + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(fut.wait(), 1).all().item()) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_async_execution_nested_with_cuda_future(self): + dst = worker_name((self.rank + 1) % self.world_size) + nested_dst = worker_name((self.rank + 2) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {"cuda:0": "cuda:0"}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + a = torch.ones((100,), device="cuda:0") + b = torch.ones((100,), device="cuda:0") + c = torch.ones((100,), device="cuda:0") + fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c)) + another_stream = torch.cuda.Stream("cuda:0") + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(fut.wait(), 3).all().item()) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_modify_tensor_inplace(self): + tensor = torch.zeros((100,), device="cuda:0") + future = Future(devices=["cuda:0"]) + future.set_result(tensor) + # It's weird to modify the value of a future once it's complete, but + # technically possible. Currently this is considered undefined behavior + # (in practice the future will ignore the modification and still + # synchronize with the original value). We could one day add logic to + # detect and warn or throw in such cases, but for now we just check that + # this doesn't crash. + tensor.fill_(1) + future.wait() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_replace_tensor(self): + tensor_list = [torch.zeros((100,), device="cuda:0")] + future = Future(devices=["cuda:0"]) + future.set_result(tensor_list) + # It's weird to modify the value of a future once it's complete, but + # technically possible. Currently this is considered undefined behavior + # (in practice the future will ignore the modification and still + # synchronize with the original value). We could one day add logic to + # detect and warn or throw in such cases, but for now we just check that + # this doesn't crash. + # We set things up so that the original tensor contained in the list + # gets deleted once we replace it with the other one. This will + # invalidate any cached information held by the future. + tensor_list[0] = torch.ones((100,), device="cuda:0") + future.wait() + + @skip_if_lt_x_gpu(1) + def test_rref_with_unpickleable_attributes(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {"cuda:0": "cuda:0"}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),)) + rref.rpc_sync().increase(1) + ret = rref.rpc_sync().sum() + self.assertEqual(ret, 42) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_cuda_sparse_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=True + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=True + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_custom_class_with_cuda_sparse_tensor(self): + self._test_cuda_future_extraction( + wrapper=TensorWrapper, unwrapper=lambda v: v.tensor, sparse_tensor=True + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py new file mode 100644 index 0000000000000000000000000000000000000000..191017caad139e6be924d5a5d2157838792ff1dd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py @@ -0,0 +1,32 @@ +import torch.distributed.rpc as rpc +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.common_distributed import ( + tp_transports, +) + + +class TensorPipeRpcAgentTestFixture(RpcAgentTestFixture): + @property + def rpc_backend(self): + return rpc.backend_registry.BackendType[ + "TENSORPIPE" + ] + + @property + def rpc_backend_options(self): + return rpc.backend_registry.construct_rpc_backend_options( + self.rpc_backend, + init_method=self.init_method, + _transports=tp_transports() + ) + + def get_shutdown_error_regex(self): + # FIXME Once we consolidate the error messages returned by the + # TensorPipe agent put some more specific regex here. + error_regexes = [".*"] + return "|".join([f"({error_str})" for error_str in error_regexes]) + + def get_timeout_error_regex(self): + return "RPC ran for more than" diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..76ecfc2a6fe90d880513f4df401e9819938a0331 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py @@ -0,0 +1,184 @@ +#!/usr/bin/env python3 +import os +import sys +import unittest +from typing import Dict, List, Type + +from torch.testing._internal.common_distributed import MultiProcessTestCase +from torch.testing._internal.common_utils import ( + TEST_WITH_DEV_DBG_ASAN, + find_free_port, + IS_SANDCASTLE, +) +from torch.testing._internal.distributed.ddp_under_dist_autograd_test import ( + CudaDdpComparisonTest, + DdpComparisonTest, + DdpUnderDistAutogradTest, +) +from torch.testing._internal.distributed.pipe_with_ddp_test import ( + PipeWithDDPTest, +) +from torch.testing._internal.distributed.nn.api.remote_module_test import ( + CudaRemoteModuleTest, + RemoteModuleTest, + ThreeWorkersRemoteModuleTest, +) +from torch.testing._internal.distributed.rpc.dist_autograd_test import ( + DistAutogradTest, + CudaDistAutogradTest, + FaultyAgentDistAutogradTest, + TensorPipeAgentDistAutogradTest, + TensorPipeCudaDistAutogradTest +) +from torch.testing._internal.distributed.rpc.dist_optimizer_test import ( + DistOptimizerTest, +) +from torch.testing._internal.distributed.rpc.jit.dist_autograd_test import ( + JitDistAutogradTest, +) +from torch.testing._internal.distributed.rpc.jit.rpc_test import JitRpcTest +from torch.testing._internal.distributed.rpc.jit.rpc_test_faulty import ( + JitFaultyAgentRpcTest, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.distributed.rpc.faulty_agent_rpc_test import ( + FaultyAgentRpcTest, +) +from torch.testing._internal.distributed.rpc.rpc_test import ( + CudaRpcTest, + RpcTest, + TensorPipeAgentRpcTest, + TensorPipeAgentCudaRpcTest, +) +from torch.testing._internal.distributed.rpc.examples.parameter_server_test import ParameterServerTest +from torch.testing._internal.distributed.rpc.examples.reinforcement_learning_rpc_test import ( + ReinforcementLearningRpcTest, +) + + +def _check_and_set_tcp_init(): + # if we are running with TCP init, set main address and port + # before spawning subprocesses, since different processes could find + # different ports. + use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None) + if use_tcp_init == "1": + os.environ["MASTER_ADDR"] = '127.0.0.1' + os.environ["MASTER_PORT"] = str(find_free_port()) + +def _check_and_unset_tcp_init(): + use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None) + if use_tcp_init == "1": + del os.environ["MASTER_ADDR"] + del os.environ["MASTER_PORT"] + +# The tests for the RPC module need to cover multiple possible combinations: +# - different aspects of the API, each one having its own suite of tests; +# - different agents (ProcessGroup, TensorPipe, ...); +# To avoid a combinatorial explosion in code size, and to prevent forgetting to +# add a combination, these are generated automatically by the code in this file. +# Here, we collect all the test suites that we need to cover. +# We then have one separate file for each agent, from which +# we call the generate_tests function of this file, passing to it a fixture for +# the agent, which then gets mixed-in with each test suite. + +@unittest.skipIf( + TEST_WITH_DEV_DBG_ASAN, "Skip ASAN as torch + multiprocessing spawn have known issues" +) +class SpawnHelper(MultiProcessTestCase): + def setUp(self): + super().setUp() + _check_and_set_tcp_init() + self._spawn_processes() + + def tearDown(self): + _check_and_unset_tcp_init() + super().tearDown() + + +# This list contains test suites that are agent-agnostic and that only verify +# compliance with the generic RPC interface specification. These tests should +# *not* make use of implementation details of a specific agent (options, +# attributes, ...). These test suites will be instantiated multiple times, once +# for each agent (except the faulty agent, which is special). +GENERIC_TESTS = [ + RpcTest, + ParameterServerTest, + DistAutogradTest, + DistOptimizerTest, + JitRpcTest, + JitDistAutogradTest, + RemoteModuleTest, + ThreeWorkersRemoteModuleTest, + DdpUnderDistAutogradTest, + DdpComparisonTest, + ReinforcementLearningRpcTest, +] +GENERIC_CUDA_TESTS = [ + CudaRpcTest, + CudaDistAutogradTest, + CudaRemoteModuleTest, + CudaDdpComparisonTest, + PipeWithDDPTest, +] + + +# This list contains test suites that will only be run on the TensorPipeAgent. +# These suites should be standalone, and separate from the ones in the generic +# list (not subclasses of those!). +TENSORPIPE_TESTS = [ + TensorPipeAgentRpcTest, + TensorPipeAgentDistAutogradTest, +] +TENSORPIPE_CUDA_TESTS = [ + TensorPipeAgentCudaRpcTest, + TensorPipeCudaDistAutogradTest, +] + + +# This list contains test suites that will only be run on the faulty RPC agent. +# That agent is special as it's only used to perform fault injection in order to +# verify the error handling behavior. Thus the faulty agent will only run the +# suites in this list, which were designed to test such behaviors, and not the +# ones in the generic list. +FAULTY_AGENT_TESTS = [ + FaultyAgentRpcTest, + FaultyAgentDistAutogradTest, + JitFaultyAgentRpcTest, +] + + +def generate_tests( + prefix: str, + mixin: Type[RpcAgentTestFixture], + tests: List[Type[RpcAgentTestFixture]], + module_name: str, +) -> Dict[str, Type[RpcAgentTestFixture]]: + """Mix in the classes needed to autogenerate the tests based on the params. + + Takes a series of test suites, each written against a "generic" agent (i.e., + derived from the abstract RpcAgentTestFixture class), as the `tests` args. + Takes a concrete subclass of RpcAgentTestFixture, which specializes it for a + certain agent, as the `mixin` arg. Produces all combinations of them. + Returns a dictionary of class names to class type + objects which can be inserted into the global namespace of the calling + module. The name of each test will be a concatenation of the `prefix` arg + and the original name of the test suite. + The `module_name` should be the name of the calling module so + that the classes can be fixed to make it look like they belong to it, which + is necessary for pickling to work on them. + """ + ret: Dict[str, Type[RpcAgentTestFixture]] = {} + for test_class in tests: + if IS_SANDCASTLE and TEST_WITH_DEV_DBG_ASAN: + print( + f'Skipping test {test_class} on sandcastle for the following reason: ' + 'Skip dev-asan as torch + multiprocessing spawn have known issues', file=sys.stderr) + continue + + name = f"{prefix}{test_class.__name__}" + class_ = type(name, (test_class, mixin, SpawnHelper), {}) + class_.__module__ = module_name + ret[name] = class_ + return ret diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/logging_tensor.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/logging_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..4c53517ae49bde15c9b1e20cf71785d78bf39354 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/logging_tensor.py @@ -0,0 +1,182 @@ +import torch +from torch.utils._pytree import tree_map +from typing import Iterator, List, Optional +import logging +import contextlib +import itertools +from torch.utils._python_dispatch import TorchDispatchMode +from torch.utils.weak import WeakTensorKeyDictionary +import functools +from torch._C._profiler import gather_traceback, symbolize_tracebacks + + +_dtype_abbrs = { + torch.bfloat16: "bf16", + torch.float64: "f64", + torch.float32: "f32", + torch.float16: "f16", + torch.complex32: "c32", + torch.complex64: "c64", + torch.complex128: "c128", + torch.int8: "i8", + torch.int16: "i16", + torch.int32: "i32", + torch.int64: "i64", + torch.bool: "b8", + torch.uint8: "u8", +} + +# How the chain of calls works for LoggingTensor: +# 1. Call torch.sin +# 2. Attempt __torch_function__. In LoggingTensor torch function is disabled so we bypass it entirely +# 3. Enter dispatcher, wind your way through Autograd +# 4. Hit Python dispatch key, call __torch_dispatch__ + +# This Tensor can work with autograd in two ways: +# - The wrapped Tensor does not require gradients. In that case, the LoggingTensor +# can require gradients if the user asks for it as a constructor kwarg. +# - The wrapped Tensor can require gradients. In that case autograd will be tracked +# for the wrapped Tensor and the LoggingTensor itself cannot require gradients. +# WARNING: We allow these two possibilities for testing purposes. You should NEVER use both in a single +# test or you might get surprising behavior. + +# TODO: TensorBase should work +class LoggingTensor(torch.Tensor): + elem: torch.Tensor + + __slots__ = ['elem'] + + context = contextlib.nullcontext + + __torch_function__ = torch._C._disabled_torch_function_impl + + @staticmethod + def __new__(cls, elem, *args, **kwargs): + # The wrapping tensor (LoggingTensor) shouldn't hold any + # memory for the class in question, but it should still + # advertise the same device as before + r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] + cls, elem.size(), + strides=elem.stride(), storage_offset=elem.storage_offset(), + # TODO: clone storage aliasing + dtype=elem.dtype, layout=elem.layout, + device=elem.device, requires_grad=kwargs.get("requires_grad", False) + ) + # ...the real tensor is held as an element on the tensor. + r.elem = elem.detach() if r.requires_grad else elem + return r + + def __repr__(self): + return super().__repr__(tensor_contents=f"{self.elem}") + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + def unwrap(e): + return e.elem if isinstance(e, cls) else e + + def wrap(e): + return cls(e) if isinstance(e, torch.Tensor) else e + + with cls.context(): + rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs))) + logging.getLogger("LoggingTensor").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs) + return rs + +class LoggingTensorMode(TorchDispatchMode): + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + rs = func(*args, **kwargs) + logging.getLogger("LoggingTensor").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs) + return rs + +class LoggingTensorReentrant(LoggingTensor): + context = torch.overrides.enable_reentrant_dispatch + +# https://stackoverflow.com/questions/36408496/python-logging-handler-to-append-to-list +class LoggingTensorHandler(logging.Handler): + def __init__( + self, log_list: List[str], use_shortid_for_all_tensors: bool, + with_type: bool, tracebacks_list: Optional[List]) -> None: + logging.Handler.__init__(self) + self.log_list = log_list + self.use_shortid_for_all_tensors = use_shortid_for_all_tensors + self.tracebacks_list = tracebacks_list + self.memo = WeakTensorKeyDictionary() + self.next_id = 0 + self.with_type = with_type + + def _shortid(self, t: torch.Tensor) -> int: + if t not in self.memo: + self.memo[t] = self.next_id + self.next_id += 1 + return self.memo[t] + + def _fmt(self, a: object, with_type: bool = False) -> str: + cond_cls = torch.Tensor if self.use_shortid_for_all_tensors else LoggingTensor + if isinstance(a, cond_cls): + maybe_type = "" + if with_type and self.with_type: + maybe_type = f": {_dtype_abbrs[a.dtype]}[{', '.join(map(str, a.shape))}]" + x = f"${self._shortid(a)}{maybe_type}" + return x + else: + return repr(a) + + def emit(self, record): + fmt_args = ", ".join( + itertools.chain( + (str(tree_map(self._fmt, a)) for a in record.args[0]), + (f"{k}={str(tree_map(self._fmt, v))}" for k, v in record.args[1].items()), + ) + ) + fmt_rets = tree_map(functools.partial(self._fmt, with_type=True), record.args[2]) + self.log_list.append(f'{fmt_rets} = {record.msg}({fmt_args})') + if self.tracebacks_list is not None: + self.tracebacks_list.append(record.traceback) + +def log_input(name: str, var: object): + logging.getLogger("LoggingTensor").info("input", (name,), {}, var) + +class GatherTraceback(logging.Filter): + def __init__(self, python=True, script=True, cpp=False): + self.python = python + self.script = script + self.cpp = cpp + + def filter(self, record): + record.traceback = gather_traceback(python=self.python, script=self.script, cpp=self.cpp) + return True + +@contextlib.contextmanager +def capture_logs(is_mode=False, python_tb=False, script_tb=False, cpp_tb=False) -> Iterator[List[str]]: + collect_traceback = python_tb or script_tb or cpp_tb + logger = logging.getLogger("LoggingTensor") + log_list: List[str] = [] + tracebacks_list: List[str] = [] + handler = LoggingTensorHandler( + log_list, + with_type=True, + use_shortid_for_all_tensors=is_mode, + tracebacks_list=tracebacks_list if collect_traceback else None + ) + logger.addHandler(handler) + logger.setLevel(logging.INFO) + logger.propagate = False + if collect_traceback: + logger.addFilter(GatherTraceback(python=python_tb, script=script_tb, cpp=cpp_tb)) + try: + if collect_traceback: + yield log_list, tracebacks_list + else: + yield log_list + finally: + symbolized_tracebacks = symbolize_tracebacks(tracebacks_list) + tracebacks_list.clear() + tracebacks_list.extend(symbolized_tracebacks) + logger.removeHandler(handler) + +@contextlib.contextmanager +def capture_logs_with_logging_tensor_mode(python_tb=False, script_tb=False, cpp_tb=False): + with LoggingTensorMode(), capture_logs(True, python_tb, script_tb, cpp_tb) as logs: + yield logs diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/aot_autograd.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/aot_autograd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86e5dbae3cc148e5672bab02d5bdf7edcf638032 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/aot_autograd.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/autograd_registration.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/autograd_registration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e85d53ff7e10d95e8665b77d09618fed9eb55b24 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/autograd_registration.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/fake_tensor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/fake_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe36df59c8b30ee223fd4480f2eb140680482d18 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/fake_tensor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/make_fx.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/make_fx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..640e847fb13a27d8912f82aff97e2b9c96d96e87 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/make_fx.cpython-310.pyc differ