python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import importlib
import numbers
import torch
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.nn import functional as F
from apex._autocast_utils import _cast_if_autocast_enabled
global fused_layer_norm_cuda
fused_layer_norm_cuda = None
# Reference implementation from Huggingface
def manual_rms_norm(input, normalized_shape, weight, eps):
# layer norm should always be calculated in float32
dims = tuple(i for i in range(-1, -len(normalized_shape)-1, -1))
variance = input.to(torch.float32).pow(2).mean(dims, keepdim=True)
input = input * torch.rsqrt(variance + eps)
if weight is None:
return input
# convert into half-precision if necessary
if weight.dtype in [torch.float16, torch.bfloat16]:
input = input.to(weight.dtype)
return weight * input
class FusedLayerNormAffineFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias, normalized_shape, eps):
global fused_layer_norm_cuda
if fused_layer_norm_cuda is None:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
weight_ = weight.contiguous()
bias_ = bias.contiguous()
output, mean, invvar = fused_layer_norm_cuda.forward_affine(
input_, ctx.normalized_shape, weight_, bias_, ctx.eps
)
ctx.save_for_backward(input_, weight_, bias_, mean, invvar)
return output
@staticmethod
def backward(ctx, grad_output):
input_, weight_, bias_, mean, invvar = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
grad_input, grad_weight, grad_bias = fused_layer_norm_cuda.backward_affine(
grad_output.contiguous(), mean, invvar, input_, ctx.normalized_shape, weight_, bias_, ctx.eps
)
return grad_input, grad_weight, grad_bias, None, None
class FusedRMSNormAffineFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, normalized_shape, eps):
global fused_layer_norm_cuda
if fused_layer_norm_cuda is None:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
weight_ = weight.contiguous()
output, invvar = fused_layer_norm_cuda.rms_forward_affine(
input_, ctx.normalized_shape, weight_, ctx.eps)
ctx.save_for_backward(input_, weight_, invvar)
return output
@staticmethod
def backward(ctx, grad_output):
input_, weight_, invvar = ctx.saved_tensors
grad_input = grad_weight = None
grad_input, grad_weight = fused_layer_norm_cuda.rms_backward_affine(
grad_output.contiguous(), invvar, input_, ctx.normalized_shape, weight_, ctx.eps
)
return grad_input, grad_weight, None, None
class FusedLayerNormAffineMixedDtypesFunction(FusedLayerNormAffineFunction):
@staticmethod
def forward(ctx, input, weight, bias, normalized_shape, eps):
global fused_layer_norm_cuda
if fused_layer_norm_cuda is None:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
weight_ = weight.contiguous()
bias_ = bias.contiguous()
output, mean, invvar = fused_layer_norm_cuda.forward_affine_mixed_dtypes(
input_, ctx.normalized_shape, weight_, bias_, ctx.eps
)
ctx.save_for_backward(input_, weight_, bias_, mean, invvar)
return output
class FusedRMSNormAffineMixedDtypesFunction(FusedRMSNormAffineFunction):
@staticmethod
def forward(ctx, input, weight, normalized_shape, eps):
global fused_layer_norm_cuda
if fused_layer_norm_cuda is None:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
weight_ = weight.contiguous()
output, invvar = fused_layer_norm_cuda.rms_forward_affine_mixed_dtypes(
input_, ctx.normalized_shape, weight_, ctx.eps
)
ctx.save_for_backward(input_, weight_, invvar)
return output
class FusedLayerNormFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, normalized_shape, eps):
global fused_layer_norm_cuda
if fused_layer_norm_cuda is None:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
output, mean, invvar = fused_layer_norm_cuda.forward(input_, ctx.normalized_shape, ctx.eps)
ctx.save_for_backward(input_, mean, invvar)
return output
@staticmethod
def backward(ctx, grad_output):
input_, mean, invvar = ctx.saved_tensors
grad_input = None
grad_input = fused_layer_norm_cuda.backward(
grad_output.contiguous(), mean, invvar, input_, ctx.normalized_shape, ctx.eps
)
return grad_input, None, None
class FusedRMSNormFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, normalized_shape, eps):
global fused_layer_norm_cuda
if fused_layer_norm_cuda is None:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
output, invvar = fused_layer_norm_cuda.rms_forward(input_, ctx.normalized_shape, ctx.eps)
ctx.save_for_backward(input_, invvar)
return output
@staticmethod
def backward(ctx, grad_output):
input_, invvar = ctx.saved_tensors
grad_input = None
grad_input = fused_layer_norm_cuda.rms_backward(
grad_output.contiguous(), invvar, input_, ctx.normalized_shape, ctx.eps
)
return grad_input, None, None
def fused_layer_norm_affine(input, weight, bias, normalized_shape, eps=1e-6):
args = _cast_if_autocast_enabled(input, weight, bias, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedLayerNormAffineFunction.apply(*args)
def fused_layer_norm(input, normalized_shape, eps=1e-6):
args = _cast_if_autocast_enabled(input, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedLayerNormFunction.apply(*args)
def mixed_dtype_fused_layer_norm_affine(input, weight, bias, normalized_shape, eps=1e-6):
args = _cast_if_autocast_enabled(input, weight, bias, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedLayerNormAffineMixedDtypesFunction.apply(*args)
def fused_rms_norm_affine(input, weight, normalized_shape, eps=1e-6):
args = _cast_if_autocast_enabled(input, weight, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedRMSNormAffineFunction.apply(*args)
def fused_rms_norm(input, normalized_shape, eps=1e-6):
args = _cast_if_autocast_enabled(input, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedRMSNormFunction.apply(*args)
def mixed_dtype_fused_rms_norm_affine(input, weight, normalized_shape, eps=1e-6):
args = _cast_if_autocast_enabled(input, weight, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedRMSNormAffineMixedDtypesFunction.apply(*args)
class FusedLayerNorm(torch.nn.Module):
r"""Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization`_ .
Currently only runs on cuda() tensors.
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated separately over the last
certain number dimensions which have to be of the shape specified by
:attr:`normalized_shape`.
:math:`\gamma` and :math:`\beta` are learnable affine transform parameters of
:attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.
.. note::
Unlike Batch Normalization and Instance Normalization, which applies
scalar scale and bias for each entire channel/plane with the
:attr:`affine` option, Layer Normalization applies per-element scale and
bias with :attr:`elementwise_affine`.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
normalized_shape (int or list or torch.Size): input shape from an expected input
of size
.. math::
[* \times \text{normalized}\_\text{shape}[0] \times \text{normalized}\_\text{shape}[1]
\times \ldots \times \text{normalized}\_\text{shape}[-1]]
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps: a value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine: a boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 5, 10, 10)
>>> # With Learnable Parameters
>>> m = apex.normalization.FusedLayerNorm(input.size()[1:])
>>> # Without Learnable Parameters
>>> m = apex.normalization.FusedLayerNorm(input.size()[1:], elementwise_affine=False)
>>> # Normalize over last two dimensions
>>> m = apex.normalization.FusedLayerNorm([10, 10])
>>> # Normalize over last dimension of size 10
>>> m = apex.normalization.FusedLayerNorm(10)
>>> # Activating the module
>>> output = m(input)
.. _`Layer Normalization`: https://arxiv.org/abs/1607.06450
"""
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
super().__init__()
global fused_layer_norm_cuda
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = torch.Size(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.empty(*normalized_shape))
self.bias = Parameter(torch.empty(*normalized_shape))
else:
self.register_parameter("weight", None)
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input):
if torch.jit.is_tracing() or torch.jit.is_scripting() or not input.is_cuda:
return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps)
if self.elementwise_affine:
return fused_layer_norm_affine(input, self.weight, self.bias, self.normalized_shape, self.eps)
else:
return fused_layer_norm(input, self.normalized_shape, self.eps)
def extra_repr(self):
return "{normalized_shape}, eps={eps}, " "elementwise_affine={elementwise_affine}".format(**self.__dict__)
class FusedRMSNorm(torch.nn.Module):
r"""Applies RMS Normalization over a mini-batch of inputs
Currently only runs on cuda() tensors.
.. math::
y = \frac{x}{\mathrm{RMS}[x]} * \gamma
The root-mean-square is calculated separately over the last
certain number dimensions which have to be of the shape specified by
:attr:`normalized_shape`.
:math:`\gamma` is a learnable affine transform parameter of
:attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.
`epsilon` is added to the mean-square, then the root of the sum is taken.
.. note::
Unlike Batch Normalization and Instance Normalization, which applies
scalar scale and bias for each entire channel/plane with the
:attr:`affine` option, RMS Normalization applies per-element scale
with :attr:`elementwise_affine`.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
normalized_shape (int or list or torch.Size): input shape from an expected input
of size
.. math::
[* \times \text{normalized}\_\text{shape}[0] \times \text{normalized}\_\text{shape}[1]
\times \ldots \times \text{normalized}\_\text{shape}[-1]]
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps: a value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine: a boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 5, 10, 10)
>>> # With Learnable Parameters
>>> m = apex.normalization.FusedRMSNorm(input.size()[1:])
>>> # Without Learnable Parameters
>>> m = apex.normalization.FusedRMSNorm(input.size()[1:], elementwise_affine=False)
>>> # Normalize over last two dimensions
>>> m = apex.normalization.FusedRMSNorm([10, 10])
>>> # Normalize over last dimension of size 10
>>> m = apex.normalization.FusedRMSNorm(10)
>>> # Activating the module
>>> output = m(input)
.. _`Root Mean Square Layer Normalization`: https://arxiv.org/pdf/1910.07467.pdf
"""
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
super().__init__()
global fused_layer_norm_cuda
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = torch.Size(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.empty(*normalized_shape))
else:
self.register_parameter("weight", None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
init.ones_(self.weight)
def forward(self, input):
if torch.jit.is_tracing() or torch.jit.is_scripting() or not input.is_cuda:
return manual_rms_norm(input, self.normalized_shape, self.weight, self.eps)
if self.elementwise_affine:
return fused_rms_norm_affine(input, self.weight, self.normalized_shape, self.eps)
else:
return fused_rms_norm(input, self.normalized_shape, self.eps)
def extra_repr(self):
return "{normalized_shape}, eps={eps}, " "elementwise_affine={elementwise_affine}".format(**self.__dict__)
# NOTE (mkozuki): Why "mixed"?
# MixedFusedLayerNorm differs from FusedLayerNorm in that this layer norm uses parameter's dtype
# as output tensor's dtype while FusedLayerNorm uses input tensor's dtype for output tensor's dtype.
# See: `layer_norm_affine` and `layer_norm_affine_mixed_dtypes` in "csrc/layer_norm_cuda.cpp"
class MixedFusedLayerNorm(FusedLayerNorm):
def __init__(self, normalized_shape, eps=1e-5, **kwargs):
if "elementwise_affine" in kwargs:
import warnings
warnings.warn("MixedFusedLayerNorm does not support `elementwise_affine` argument")
elementwise_affine = kwargs.pop("elementwise_affine")
if not elementwise_affine:
raise RuntimeError("MixedFusedLayerNorm does not support `elementwise_affine = False`")
super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=True)
def forward(self, input: torch.Tensor):
# NOTE (mkozuki): CPU path is here mainly for unittest sake.
if torch.jit.is_tracing() or torch.jit.is_scripting() or not input.is_cuda:
return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps)
return mixed_dtype_fused_layer_norm_affine(input, self.weight, self.bias, self.normalized_shape, self.eps)
# MixedFusedLayerNorm differs from FusedLayerNorm in that this layer norm uses parameter's dtype
# as output tensor's dtype while FusedLayerNorm uses input tensor's dtype for output tensor's dtype.
# See: `layer_norm_affine` and `layer_norm_affine_mixed_dtypes` in "csrc/layer_norm_cuda.cpp"
class MixedFusedRMSNorm(FusedRMSNorm):
def __init__(self, normalized_shape, eps=1e-5, **kwargs):
if "elementwise_affine" in kwargs:
import warnings
warnings.warn("MixedFusedRMSNorm does not support `elementwise_affine` argument")
elementwise_affine = kwargs.pop("elementwise_affine")
if not elementwise_affine:
raise RuntimeError("MixedFusedRMSNorm does not support `elementwise_affine = False`")
super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=True)
def forward(self, input: torch.Tensor):
# NOTE (mkozuki): CPU path is here mainly for unittest sake.
# TODO Manual RMS Norm Implementation Here
if torch.jit.is_tracing() or torch.jit.is_scripting() or not input.is_cuda:
return manual_rms_norm(input, self.normalized_shape, self.weight, self.eps)
return mixed_dtype_fused_rms_norm_affine(input, self.weight, self.normalized_shape, self.eps)
| GeneSplice-main | GeneSplice/apex/apex/normalization/fused_layer_norm.py |
from .fused_layer_norm import FusedLayerNorm, MixedFusedLayerNorm, FusedRMSNorm, MixedFusedRMSNorm
| GeneSplice-main | GeneSplice/apex/apex/normalization/__init__.py |
from .fused_dense import *
| GeneSplice-main | GeneSplice/apex/apex/fused_dense/__init__.py |
import torch
from torch import nn
import fused_dense_cuda
from apex._autocast_utils import _cast_if_autocast_enabled
#implements fused GEMM+bias in forward pass using mlp_cuda from apex
class FusedDenseFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias):
ctx.save_for_backward(input, weight)
output = fused_dense_cuda.linear_bias_forward(input, weight, bias)
return output
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
grad_input, grad_weight, grad_bias = fused_dense_cuda.linear_bias_backward(input, weight, grad_output)
return grad_input, grad_weight, grad_bias
class DenseNoBiasFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight):
ctx.save_for_backward(input, weight)
output = torch.matmul(input, weight.t())
return output
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
grad_input = grad_output.mm(weight)
grad_weight = grad_output.t().mm(input)
return grad_input, grad_weight
class FusedDenseGeluDenseFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight1, bias1, weight2, bias2):
ctx.save_for_backward(input, weight1, weight2)
output1, output2, gelu_in = fused_dense_cuda.linear_gelu_linear_forward(input, weight1, bias1, weight2, bias2)
ctx.save_for_backward(input, weight1, weight2, gelu_in, output1)
return output2
@staticmethod
def backward(ctx, grad_output):
input, weight1, weight2, gelu_in, output1 = ctx.saved_tensors
grad_input, grad_weight1, grad_bias1, grad_weight2, grad_bias2 = fused_dense_cuda.linear_gelu_linear_backward(input, gelu_in, output1, weight1, weight2, grad_output)
return grad_input, grad_weight1, grad_bias1, grad_weight2, grad_bias2
def _fused_dense(input, weight, bias):
args = _cast_if_autocast_enabled(input, weight, bias)
with torch.cuda.amp.autocast(enabled=False):
return FusedDenseFunc.apply(*args)
def _dense_no_bias(input, weight):
args = _cast_if_autocast_enabled(input, weight)
with torch.cuda.amp.autocast(enabled=False):
return DenseNoBiasFunc.apply(*args)
def _fused_dense_gelu_dense(input, weight1, bias1, weight2, bias2):
args = _cast_if_autocast_enabled(input, weight1, bias1, weight2, bias2)
with torch.cuda.amp.autocast(enabled=False):
return FusedDenseGeluDenseFunc.apply(*args)
class FusedDense(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(FusedDense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.empty(out_features, in_features))
if bias:
self.bias = nn.Parameter(torch.empty(out_features))
else:
#assert False, "no-bias option not added yet"
self.register_parameter('bias', None)
def forward(self, input):
if self.bias is not None:
return _fused_dense(input, self.weight, self.bias)
else:
return _dense_no_bias(input, self.weight)
class FusedDenseGeluDense(nn.Module):
def __init__(self, in_features, intermediate_features, out_features, bias=True):
super(FusedDenseGeluDense, self).__init__()
assert bias == True, "DenseGeluDense module without bias is currently not supported"
self.in_features = in_features
self.intermediate_features = intermediate_features
self.out_features = out_features
self.weight1 = nn.Parameter(torch.empty(intermediate_features, in_features))
self.bias1 = nn.Parameter(torch.empty(intermediate_features))
self.weight2 = nn.Parameter(torch.empty(out_features, intermediate_features))
self.bias2 = nn.Parameter(torch.empty(out_features))
def forward(self, input):
return _fused_dense_gelu_dense(input, self.weight1, self.bias1, self.weight2, self.bias2)
| GeneSplice-main | GeneSplice/apex/apex/fused_dense/fused_dense.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
class LayerType(enum.Enum):
encoder = 1
decoder = 2
class AttnType(enum.Enum):
self_attn = 1
cross_attn = 2
class AttnMaskType(enum.Enum):
padding = 1
causal = 2
class ModelType(enum.Enum):
encoder_or_decoder = 1
encoder_and_decoder = 2
| GeneSplice-main | GeneSplice/apex/apex/transformer/enums.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO (mkozuki): Replace assert with RuntimeError.
# TODO (mkozuki): Sort the functions in the same order of megatron/mpu/initialize.py
"""Model and data parallel groups."""
from typing import Tuple, Optional
import warnings
import os
import torch
from apex.transformer.log_util import get_transformer_logger
from apex.transformer._ucc_util import HAS_UCC
_logger = get_transformer_logger(__name__)
# N.B. (mkozuki): Diff btwn Megatron-LM & apex parallel_state
# set(megatron_mpu_initialize_funcs) - set(apex.transformer.parallel_state) =
# {
# 'get_num_layers',
# }
# Intra-layer model parallel group that the current rank belongs to.
_TENSOR_MODEL_PARALLEL_GROUP = None
# Inter-layer model parallel group that the current rank belongs to.
_PIPELINE_MODEL_PARALLEL_GROUP = None
# Model parallel group (both intra- and pipeline) that the current rank belongs to.
_MODEL_PARALLEL_GROUP = None
# Embedding group.
_EMBEDDING_GROUP = None
# Position embedding group.
_POSITION_EMBEDDING_GROUP = None
# Relative position embedding group.
_ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP = None
_DECODER_RELATIVE_POSITION_EMBEDDING_GROUP = None
# Data parallel group that the current rank belongs to.
_DATA_PARALLEL_GROUP = None
# Data parallel AMAX reduction group that the current rank belongs to.
_AMAX_REDUCTION_GROUP = None
_VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = None
_VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
_PIPELINE_MODEL_PARALLEL_SPLIT_RANK = None
# These values enable us to change the mpu sizes on the fly.
_MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = None
_MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
_MPU_TENSOR_MODEL_PARALLEL_RANK = None
_MPU_PIPELINE_MODEL_PARALLEL_RANK = None
# A list of ranks that have a copy of the embedding.
_EMBEDDING_GLOBAL_RANKS = None
# A list of ranks that have a copy of the position embedding.
_POSITION_EMBEDDING_GLOBAL_RANKS = None
# A list of ranks that have a copy of the relative position embedding.
_ENCODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS = None
_DECODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS = None
# A list of global ranks for each pipeline group to ease calculation of the source
# rank when broadcasting from the first or last pipeline stage
_PIPELINE_GLOBAL_RANKS = None
def is_unitialized():
"""Useful for code segments that may be accessed with or without mpu initialization"""
return _DATA_PARALLEL_GROUP is None
def set_nccl_socket_envs():
if os.getenv("NCCL_SOCKET_IFNAME") is None:
raise RuntimeError("NCCL_SOCKET_IFNAME was not set")
os.environ["NCCL_NET"] = "Socket"
def set_nccl_ib_envs():
os.environ["NCCL_NET"] = "IB"
def init_nccl_net(group):
temp = torch.ones(1, device="cuda")
torch.distributed.all_reduce(temp, group=group)
torch.cuda.synchronize()
def new_nccl_socket_group(ranks):
set_nccl_socket_envs()
group = torch.distributed.new_group(ranks, backend="nccl")
init_nccl_net(group=group)
return group
def new_nccl_ib_group(ranks):
set_nccl_ib_envs()
group = torch.distributed.new_group(ranks, backend="nccl")
init_nccl_net(group=group)
return group
def new_process_group(ranks, backend):
"""
This function creates process groups.
In addition to simply creating the process groups, it initializes NCCL
for hybrid IB/Socket network like in the following diagram:
____________
[GPU Node 0]---TCP---| |---TCP---[GPU Node 2]
| | | |
| | | |
IB | IP Network | IB
| | | |
| | | |
[GPU Node 1]---TCP---|____________|---TCP---[GPU Node 3]
If an environment variable NUM_GPUS_PER_IB_BLOCK is defined it looks up the ranks
and determines whether the list of ranks belong to the same computational block where
GPUs nodes are interconnected via IB type of connection or not.
If all ranks are in the same block, the process group will use NCCL_NET=IB for
communication, otherwise it will use NCCL_NET=Socket.
If NCCL_NET=Socket is ever to be used, the user must set NCCL_SOCKET_IFNAME.
Additionally, it is recommended to set NCCL_SOCKET_NTHREADS and
NCCL_NSOCKS_PERTHREAD before running the job.
See: https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/env.html
for more info
The core assumption for this functionality is that the ranks are evenly divided
into IB blocks and all these IB blocks are of the same size.
"""
if backend is None:
backend = "nccl"
compute_block_size = os.getenv("NUM_GPUS_PER_IB_BLOCK")
if backend == "nccl" and compute_block_size is not None:
compute_block_size = int(compute_block_size)
blocks = [rank // compute_block_size for rank in ranks]
use_ib = all(block == blocks[0] for block in blocks)
if use_ib:
return new_nccl_ib_group(ranks)
else:
return new_nccl_socket_group(ranks)
else:
return torch.distributed.new_group(ranks, backend=backend)
def initialize_model_parallel(
tensor_model_parallel_size_: int = 1,
pipeline_model_parallel_size_: int = 1,
virtual_pipeline_model_parallel_size_: Optional[int] = None,
pipeline_model_parallel_split_rank_: Optional[int] = None,
use_fp8_: bool = False,
init_mpi_proc_group: bool = False,
*,
default_backend: Optional[str] = None,
p2p_backend: Optional[str] = None,
) -> None:
"""
Initialize model data parallel groups.
Arguments:
tensor_model_parallel_size: number of GPUs used to parallelize model tensor.
pipeline_model_parallel_size: number of GPUs used to parallelize model pipeline.
virtual_pipeline_model_parallel_size: number of virtual stages (interleaved pipeline).
pipeline_model_parallel_split_rank: for models with both encoder and decoder, rank in pipeline with split point.
use_fp8_: FP8 training that needs AMAX reduction across data-parallel ranks.
init_mpi_proc_group: Create a MPI process group, which is used for UCX-based communication APIs.
Keyword Arguments:
default_backend: Backend of process groups except for pipeline parallel ones.
If :obj:`None`, the backend specified in `torch.distributed.init_process_group` will be used.
p2p_backend: Backend of process groups for pipeline model parallel.
If :obj:`None`, the backend specified in `torch.distributed.init_process_group` will be used.
.. note::
`torch_ucc <https://github.com/facebookresearch/torch_ucc>`_ is
necessary for "ucc" backend.
Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we
use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize
the model pipeline. The present function will
create 8 tensor model-parallel groups, 4 pipeline model-parallel groups
and 8 data-parallel groups as:
8 data_parallel groups:
[g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15]
8 tensor model-parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15]
4 pipeline model-parallel groups:
[g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
assert default_backend is None or default_backend in ("nccl", "ucc")
assert p2p_backend is None or p2p_backend in ("nccl", "ucc")
if "ucc" in (default_backend, p2p_backend):
if not HAS_UCC:
raise ImportError("UCC backend requires pytorch source build with UCC installed and enabled")
warnings.warn("`ucc` backend support is experimental", ExperimentalWarning)
if default_backend == "ucc":
warnings.warn("The UCC's functionality as `default_backend` is not well verified", ExperimentalWarning)
# Saving the NCCL_NET type for reusing it at the epilogue
default_nccl_net = os.getenv("NCCL_NET")
world_size: int = torch.distributed.get_world_size()
tensor_model_parallel_size: int = min(tensor_model_parallel_size_, world_size)
pipeline_model_parallel_size: int = min(pipeline_model_parallel_size_, world_size)
if world_size % (tensor_model_parallel_size * pipeline_model_parallel_size) != 0:
raise RuntimeError(
f"`world_size` ({world_size}) is not divisible by tensor_model_parallel_size ({tensor_model_parallel_size}) x pipeline_model_parallel_size ({pipeline_model_parallel_size})"
)
data_parallel_size: int = world_size // (
tensor_model_parallel_size * pipeline_model_parallel_size
)
if torch.distributed.get_rank() == 0:
_logger.info(
"> initializing tensor model parallel with size {}".format(
tensor_model_parallel_size
)
)
_logger.info(
"> initializing pipeline model parallel with size {}".format(
pipeline_model_parallel_size
)
)
_logger.info(
"> initializing data parallel with size {}".format(data_parallel_size)
)
num_tensor_model_parallel_groups: int = world_size // tensor_model_parallel_size
num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size
num_data_parallel_groups: int = world_size // data_parallel_size
if virtual_pipeline_model_parallel_size_ is not None:
# n.b. (eqy) This check was inherited from Megatron-LM, need to revisit
# the root cause as we do see numerical mismatches with 2 stages and
# the interleaved schedule
assert pipeline_model_parallel_size_ > 2, (
"pipeline-model-parallel size should be greater than 2 with "
"interleaved schedule"
)
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
_VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = 0
_VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = (
virtual_pipeline_model_parallel_size_
)
if pipeline_model_parallel_split_rank_ is not None:
global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
_PIPELINE_MODEL_PARALLEL_SPLIT_RANK = pipeline_model_parallel_split_rank_
rank = torch.distributed.get_rank()
# Build the data-parallel groups.
global _DATA_PARALLEL_GROUP
assert _DATA_PARALLEL_GROUP is None, "data parallel group is already initialized"
all_data_parallel_group_ranks = []
for i in range(pipeline_model_parallel_size):
start_rank = i * num_pipeline_model_parallel_groups
end_rank = (i + 1) * num_pipeline_model_parallel_groups
for j in range(tensor_model_parallel_size):
ranks = range(start_rank + j, end_rank, tensor_model_parallel_size)
all_data_parallel_group_ranks.append(list(ranks))
group = new_process_group(ranks, backend=default_backend)
if rank in ranks:
_DATA_PARALLEL_GROUP = group
# Build the amax-reduction groups for fp8 precision conversion.
if use_fp8_:
global _AMAX_REDUCTION_GROUP
assert _AMAX_REDUCTION_GROUP is None, "amax reduction group is already initialized"
amax_group_size: int = tensor_model_parallel_size * data_parallel_size
num_amax_groups: int = world_size // amax_group_size
for i in range(num_amax_groups):
start_rank = i * amax_group_size
end_rank = (i + 1) * amax_group_size
ranks = range(start_rank, end_rank)
group = torch.distributed.new_group(ranks, backend=default_backend)
if rank in ranks:
_AMAX_REDUCTION_GROUP = group
# Build the model-parallel groups.
global _MODEL_PARALLEL_GROUP
assert _MODEL_PARALLEL_GROUP is None, "model parallel group is already initialized"
for i in range(data_parallel_size):
ranks = [
data_parallel_group_ranks[i]
for data_parallel_group_ranks in all_data_parallel_group_ranks
]
group = new_process_group(ranks, backend=default_backend)
if rank in ranks:
_MODEL_PARALLEL_GROUP = group
# Build the tensor model-parallel groups.
global _TENSOR_MODEL_PARALLEL_GROUP
assert (
_TENSOR_MODEL_PARALLEL_GROUP is None
), "tensor model parallel group is already initialized"
for i in range(num_tensor_model_parallel_groups):
ranks = list(
range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)
)
group = new_process_group(ranks, backend=default_backend)
if rank in ranks:
_TENSOR_MODEL_PARALLEL_GROUP = group
# Build the pipeline model-parallel groups and embedding groups
# (first and last rank in each pipeline model-parallel group).
global _PIPELINE_MODEL_PARALLEL_GROUP
global _PIPELINE_GLOBAL_RANKS
assert (
_PIPELINE_MODEL_PARALLEL_GROUP is None
), "pipeline model parallel group is already initialized"
global _EMBEDDING_GROUP
global _EMBEDDING_GLOBAL_RANKS
assert _EMBEDDING_GROUP is None, "embedding group is already initialized"
global _POSITION_EMBEDDING_GROUP
global _POSITION_EMBEDDING_GLOBAL_RANKS
assert (
_POSITION_EMBEDDING_GROUP is None
), "position embedding group is already initialized"
global _ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP
global _DECODER_RELATIVE_POSITION_EMBEDDING_GROUP
global _ENCODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS
global _DECODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS
assert _ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP is None or \
_DECODER_RELATIVE_POSITION_EMBEDDING_GROUP is None, \
'relative position embedding group is already initialized'
for i in range(num_pipeline_model_parallel_groups):
ranks = range(i, world_size, num_pipeline_model_parallel_groups)
group = new_process_group(ranks, backend=p2p_backend)
if rank in ranks:
_PIPELINE_MODEL_PARALLEL_GROUP = group
_PIPELINE_GLOBAL_RANKS = ranks
# Setup embedding group (to exchange gradients between
# first and last stages).
encoder_relative_position_embedding_ranks = None
decoder_relative_position_embedding_ranks = None
if len(ranks) > 1:
embedding_ranks = [ranks[0], ranks[-1]]
position_embedding_ranks = [ranks[0]]
encoder_relative_position_embedding_ranks = [ranks[0]]
decoder_relative_position_embedding_ranks = [ranks[0]]
if pipeline_model_parallel_split_rank_ is not None:
encoder_relative_position_embedding_ranks = \
ranks[:pipeline_model_parallel_split_rank_]
decoder_relative_position_embedding_ranks = \
ranks[pipeline_model_parallel_split_rank_:]
if ranks[pipeline_model_parallel_split_rank_] not in embedding_ranks:
embedding_ranks = [
ranks[0],
ranks[pipeline_model_parallel_split_rank_],
ranks[-1],
]
if (
ranks[pipeline_model_parallel_split_rank_]
not in position_embedding_ranks
):
position_embedding_ranks = [
ranks[0],
ranks[pipeline_model_parallel_split_rank_],
]
else:
embedding_ranks = ranks
position_embedding_ranks = ranks
encoder_relative_position_embedding_ranks = ranks
decoder_relative_position_embedding_ranks = ranks
group = new_process_group(embedding_ranks, backend=p2p_backend)
if rank in embedding_ranks:
_EMBEDDING_GROUP = group
if rank in ranks:
_EMBEDDING_GLOBAL_RANKS = embedding_ranks
group = new_process_group(position_embedding_ranks, backend=p2p_backend)
if rank in position_embedding_ranks:
_POSITION_EMBEDDING_GROUP = group
if rank in ranks:
_POSITION_EMBEDDING_GLOBAL_RANKS = position_embedding_ranks
if encoder_relative_position_embedding_ranks:
group = new_process_group(encoder_relative_position_embedding_ranks, backend=p2p_backend)
if rank in encoder_relative_position_embedding_ranks:
_ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP = group
if rank in ranks:
_ENCODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS = \
encoder_relative_position_embedding_ranks
if decoder_relative_position_embedding_ranks:
group = new_process_group(decoder_relative_position_embedding_ranks, backend=p2p_backend)
if rank in decoder_relative_position_embedding_ranks:
_DECODER_RELATIVE_POSITION_EMBEDDING_GROUP = group
if rank in ranks:
_DECODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS = \
decoder_relative_position_embedding_ranks
if init_mpi_proc_group:
torch.distributed.new_group(backend='mpi')
if default_nccl_net == "Socket":
set_nccl_socket_envs()
elif default_nccl_net == "IB":
set_nccl_ib_envs()
elif default_nccl_net is None:
os.unsetenv("NCCL_NET")
else:
os.environ["NCCL_NET"] = default_nccl_net
def get_rank_info() -> Tuple[int, int, int]:
"""Returns a tuple of (data, tensor, pipeline, virtual pipeline)-parallel-rank for logger."""
if model_parallel_is_initialized():
return (
get_data_parallel_rank(),
get_tensor_model_parallel_rank(),
get_pipeline_model_parallel_rank(),
get_virtual_pipeline_model_parallel_rank(),
)
return (0, 0, 0, 0)
def model_parallel_is_initialized():
"""Check if model and data parallel groups are initialized."""
if (
_TENSOR_MODEL_PARALLEL_GROUP is None
or _PIPELINE_MODEL_PARALLEL_GROUP is None
or _DATA_PARALLEL_GROUP is None
):
return False
return True
def get_model_parallel_group():
"""Get the model parallel group the caller rank belongs to."""
assert _MODEL_PARALLEL_GROUP is not None, "model parallel group is not initialized"
return _MODEL_PARALLEL_GROUP
def get_tensor_model_parallel_group():
"""Get the tensor model parallel group the caller rank belongs to."""
assert (
_TENSOR_MODEL_PARALLEL_GROUP is not None
), "intra_layer_model parallel group is not initialized"
return _TENSOR_MODEL_PARALLEL_GROUP
def get_pipeline_model_parallel_group():
"""Get the pipeline model parallel group the caller rank belongs to."""
assert (
_PIPELINE_MODEL_PARALLEL_GROUP is not None
), "pipeline_model parallel group is not initialized"
return _PIPELINE_MODEL_PARALLEL_GROUP
def get_data_parallel_group():
"""Get the data parallel group the caller rank belongs to."""
assert _DATA_PARALLEL_GROUP is not None, "data parallel group is not initialized"
return _DATA_PARALLEL_GROUP
def get_amax_reduction_group():
"""Get the amax reduction group the caller rank belongs to."""
assert _AMAX_REDUCTION_GROUP is not None, \
"AMAX reduction group is not initialized"
return _AMAX_REDUCTION_GROUP
def get_embedding_group():
"""Get the embedding group the caller rank belongs to."""
assert _EMBEDDING_GROUP is not None, "embedding group is not initialized"
return _EMBEDDING_GROUP
def get_position_embedding_group():
"""Get the position embedding group the caller rank belongs to."""
assert (
_POSITION_EMBEDDING_GROUP is not None
), "position embedding group is not initialized"
return _POSITION_EMBEDDING_GROUP
def get_encoder_relative_position_embedding_group():
"""Get the encoder relative position embedding group the caller rank belongs to."""
assert _ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP is not None, \
'encoder relative position embedding group is not initialized'
return _ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP
def get_decoder_relative_position_embedding_group():
"""Get the decoder relative position embedding group the caller rank belongs to."""
assert _DECODER_RELATIVE_POSITION_EMBEDDING_GROUP is not None, \
'decoder relative position embedding group is not initialized'
return _DECODER_RELATIVE_POSITION_EMBEDDING_GROUP
def is_rank_in_embedding_group(ignore_virtual=False):
"""Return true if current rank is in embedding group, False otherwise."""
rank = torch.distributed.get_rank()
global _EMBEDDING_GLOBAL_RANKS
if ignore_virtual:
return rank in _EMBEDDING_GLOBAL_RANKS
if rank in _EMBEDDING_GLOBAL_RANKS:
if rank == _EMBEDDING_GLOBAL_RANKS[0]:
return is_pipeline_first_stage(ignore_virtual=False)
elif rank == _EMBEDDING_GLOBAL_RANKS[-1]:
return is_pipeline_last_stage(ignore_virtual=False)
else:
return True
return False
def is_rank_in_position_embedding_group():
"""Return whether the current rank is in position embedding group."""
rank = torch.distributed.get_rank()
global _POSITION_EMBEDDING_GLOBAL_RANKS
return rank in _POSITION_EMBEDDING_GLOBAL_RANKS
def is_rank_in_encoder_relative_position_embedding_group():
"""Return true if current rank is in encoder relative position embedding group, False otherwise."""
rank = torch.distributed.get_rank()
global _ENCODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS
return rank in _ENCODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS
def is_rank_in_decoder_relative_position_embedding_group():
"""Return true if current rank is in decoder relative position embedding group, False otherwise."""
rank = torch.distributed.get_rank()
global _DECODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS
return rank in _DECODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS
def is_pipeline_stage_before_split(rank=None):
"""Return True if pipeline stage executes encoder block for a model
with both encoder and decoder."""
if get_pipeline_model_parallel_world_size() == 1:
return True
if rank is None:
rank = get_pipeline_model_parallel_rank()
global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
if _PIPELINE_MODEL_PARALLEL_SPLIT_RANK is None:
return True
if rank < _PIPELINE_MODEL_PARALLEL_SPLIT_RANK:
return True
return False
def is_pipeline_stage_after_split(rank=None):
"""Return True if pipeline stage executes decoder block for a model
with both encoder and decoder."""
if get_pipeline_model_parallel_world_size() == 1:
return True
if rank is None:
rank = get_pipeline_model_parallel_rank()
global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
if _PIPELINE_MODEL_PARALLEL_SPLIT_RANK is None:
return True
if rank >= _PIPELINE_MODEL_PARALLEL_SPLIT_RANK:
return True
return False
def is_pipeline_stage_at_split():
"""Return true if pipeline stage executes decoder block and next
stage executes encoder block for a model with both encoder and
decoder."""
rank = get_pipeline_model_parallel_rank()
return is_pipeline_stage_before_split(rank) and is_pipeline_stage_after_split(
rank + 1
)
def set_tensor_model_parallel_world_size(world_size):
"""Set the tensor model parallel size"""
global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
_MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = world_size
def set_pipeline_model_parallel_world_size(world_size):
"""Set the pipeline model parallel size"""
global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
_MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = world_size
def get_tensor_model_parallel_world_size():
"""Return world size for the tensor model parallel group."""
global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
if _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE is not None:
return _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
return torch.distributed.get_world_size(group=get_tensor_model_parallel_group())
def get_pipeline_model_parallel_world_size():
"""Return world size for the pipeline model parallel group."""
global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
if _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE is not None:
return _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
return torch.distributed.get_world_size(group=get_pipeline_model_parallel_group())
def set_tensor_model_parallel_rank(rank):
"""Set tensor model parallel rank."""
global _MPU_TENSOR_MODEL_PARALLEL_RANK
_MPU_TENSOR_MODEL_PARALLEL_RANK = rank
def set_pipeline_model_parallel_rank(rank):
"""Set pipeline model parallel rank."""
global _MPU_PIPELINE_MODEL_PARALLEL_RANK
_MPU_PIPELINE_MODEL_PARALLEL_RANK = rank
def get_tensor_model_parallel_rank():
"""Return my rank for the tensor model parallel group."""
global _MPU_TENSOR_MODEL_PARALLEL_RANK
if _MPU_TENSOR_MODEL_PARALLEL_RANK is not None:
return _MPU_TENSOR_MODEL_PARALLEL_RANK
return torch.distributed.get_rank(group=get_tensor_model_parallel_group())
def get_pipeline_model_parallel_rank():
"""Return my rank for the pipeline model parallel group."""
global _MPU_PIPELINE_MODEL_PARALLEL_RANK
if _MPU_PIPELINE_MODEL_PARALLEL_RANK is not None:
return _MPU_PIPELINE_MODEL_PARALLEL_RANK
return torch.distributed.get_rank(group=get_pipeline_model_parallel_group())
# TODO (mkozuki): Add [`get_num_layers`](https://github.com/NVIDIA/Megatron-LM/blob/e156d2fea7fc5c98e645f7742eb86b643956d840/megatron/mpu/initialize.py#L321) here, maybe?
def get_pipeline_model_parallel_split_rank():
"""Return my rank for the pipeline model parallel split rank."""
global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
return _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
def set_pipeline_model_parallel_split_rank(pipeline_model_parallel_split_rank: int):
"""Set my rank for the pipeline model parallel split rank."""
global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
_PIPELINE_MODEL_PARALLEL_SPLIT_RANK = pipeline_model_parallel_split_rank
def is_pipeline_first_stage(ignore_virtual=False):
"""Return True if in the first pipeline model-parallel stage, False otherwise."""
if not ignore_virtual:
if (
get_virtual_pipeline_model_parallel_world_size() is not None
and get_virtual_pipeline_model_parallel_rank() != 0
):
return False
return get_pipeline_model_parallel_rank() == 0
def is_pipeline_last_stage(ignore_virtual=False):
"""Return True if in the last pipeline model-parallel stage, False otherwise."""
if not ignore_virtual:
virtual_pipeline_model_parallel_world_size = (
get_virtual_pipeline_model_parallel_world_size()
)
if virtual_pipeline_model_parallel_world_size is not None and get_virtual_pipeline_model_parallel_rank() != (
virtual_pipeline_model_parallel_world_size - 1
):
return False
return get_pipeline_model_parallel_rank() == (
get_pipeline_model_parallel_world_size() - 1
)
def get_virtual_pipeline_model_parallel_rank():
"""Return the virtual pipeline-parallel rank."""
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
return _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
def set_virtual_pipeline_model_parallel_rank(rank):
"""Set the virtual pipeline-parallel rank."""
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
_VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = rank
def get_virtual_pipeline_model_parallel_world_size():
"""Return the virtual pipeline-parallel world size."""
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
return _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
def set_virtual_pipeline_model_parallel_world_size(size):
"""Return the virtual pipeline-parallel world size."""
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
_VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = size
def get_tensor_model_parallel_src_rank():
"""Calculate the global rank corresponding to the first local rank
in the tensor model parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_tensor_model_parallel_world_size()
return (global_rank // local_world_size) * local_world_size
def get_data_parallel_src_rank():
"""Calculate the global rank corresponding to the first local rank in the data parallel group."""
global_rank = torch.distributed.get_rank()
data_parallel_size: int = get_data_parallel_world_size()
num_data_parallel_groups = torch.distributed.get_world_size() // data_parallel_size
return global_rank % num_data_parallel_groups
def get_pipeline_model_parallel_first_rank():
assert (
_PIPELINE_GLOBAL_RANKS is not None
), "Pipeline parallel group is not initialized"
return _PIPELINE_GLOBAL_RANKS[0]
def get_pipeline_model_parallel_last_rank():
assert (
_PIPELINE_GLOBAL_RANKS is not None
), "Pipeline parallel group is not initialized"
last_rank_local = get_pipeline_model_parallel_world_size() - 1
return _PIPELINE_GLOBAL_RANKS[last_rank_local]
def get_pipeline_model_parallel_next_rank():
assert (
_PIPELINE_GLOBAL_RANKS is not None
), "Pipeline parallel group is not initialized"
rank_in_pipeline = get_pipeline_model_parallel_rank()
world_size = get_pipeline_model_parallel_world_size()
return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline + 1) % world_size]
def get_pipeline_model_parallel_prev_rank():
assert (
_PIPELINE_GLOBAL_RANKS is not None
), "Pipeline parallel group is not initialized"
rank_in_pipeline = get_pipeline_model_parallel_rank()
world_size = get_pipeline_model_parallel_world_size()
return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline - 1) % world_size]
def get_data_parallel_world_size():
"""Return world size for the data parallel group."""
return torch.distributed.get_world_size(group=get_data_parallel_group())
def get_data_parallel_rank():
"""Return my rank for the data parallel group."""
return torch.distributed.get_rank(group=get_data_parallel_group())
# note (mkozuki): `destroy_model_parallel` voids more global variables than Megatron-LM.
# Otherwise pipeline parallel forward_backward functions test hangs possibly because
# the clean-up of the original is NOT enough.
def destroy_model_parallel():
"""Set the groups to none."""
global _MODEL_PARALLEL_GROUP
_MODEL_PARALLEL_GROUP = None
global _TENSOR_MODEL_PARALLEL_GROUP
_TENSOR_MODEL_PARALLEL_GROUP = None
global _PIPELINE_MODEL_PARALLEL_GROUP
_PIPELINE_MODEL_PARALLEL_GROUP = None
global _DATA_PARALLEL_GROUP
_DATA_PARALLEL_GROUP = None
global _AMAX_REDUCTION_GROUP
_AMAX_REDUCTION_GROUP = None
global _EMBEDDING_GROUP
_EMBEDDING_GROUP = None
global _POSITION_EMBEDDING_GROUP
_POSITION_EMBEDDING_GROUP = None
global _ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP
_ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP = None
global _DECODER_RELATIVE_POSITION_EMBEDDING_GROUP
_DECODER_RELATIVE_POSITION_EMBEDDING_GROUP = None
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
_VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = None
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
_VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
_MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = None
global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
_MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
global _MPU_TENSOR_MODEL_PARALLEL_RANK
_MPU_TENSOR_MODEL_PARALLEL_RANK = None
global _MPU_PIPELINE_MODEL_PARALLEL_RANK
_MPU_PIPELINE_MODEL_PARALLEL_RANK = None
# Used to warn when the UCC is specified.
class ExperimentalWarning(Warning): pass
| GeneSplice-main | GeneSplice/apex/apex/transformer/parallel_state.py |
import logging
import os
def get_transformer_logger(name: str) -> logging.Logger:
name_wo_ext = os.path.splitext(name)[0]
return logging.getLogger(name_wo_ext)
def set_logging_level(verbosity) -> None:
"""Change logging severity.
Args:
verbosity
"""
from apex import _library_root_logger
_library_root_logger.setLevel(verbosity)
| GeneSplice-main | GeneSplice/apex/apex/transformer/log_util.py |
from apex.transformer import amp
from apex.transformer import functional
from apex.transformer import parallel_state
from apex.transformer import pipeline_parallel
from apex.transformer import tensor_parallel
from apex.transformer import utils
from apex.transformer.enums import LayerType
from apex.transformer.enums import AttnType
from apex.transformer.enums import AttnMaskType
__all__ = [
"amp",
"functional",
"parallel_state",
"pipeline_parallel",
"tensor_parallel",
"utils",
# enums.py
"LayerType",
"AttnType",
"AttnMaskType",
]
| GeneSplice-main | GeneSplice/apex/apex/transformer/__init__.py |
from torch import distributed as dist
HAS_UCC = hasattr(dist, "is_ucc_available") and dist.is_ucc_available()
if not HAS_UCC:
try:
import torch_ucc
HAS_UCC = True
except ImportError:
HAS_UCC = False
| GeneSplice-main | GeneSplice/apex/apex/transformer/_ucc_util.py |
"""Utility functions used by both `pipeline_parallel` and `tensor_parallel`"""
import torch
from apex.transformer import parallel_state
# `all_gather_into_tensor` is new placeholders for `_all_gather_base`.
# It requires the most recent version of PyTorch.
# The following 4 lines are for backward comparability with
# older PyTorch.
if "all_gather_into_tensor" not in dir(torch.distributed):
torch.distributed.all_gather_into_tensor = torch.distributed._all_gather_base
def ensure_divisibility(numerator, denominator):
"""Ensure that numerator is divisible by the denominator."""
assert numerator % denominator == 0, "{} is not divisible by {}".format(
numerator, denominator
)
def divide(numerator, denominator):
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
def split_tensor_into_1d_equal_chunks(tensor):
"""Break a tensor into equal 1D chunks."""
data = tensor.view(-1)
partition_size = (
torch.numel(data) // parallel_state.get_tensor_model_parallel_world_size()
)
start_index = partition_size * parallel_state.get_tensor_model_parallel_rank()
end_index = start_index + partition_size
return data[start_index:end_index]
def gather_split_1d_tensor(tensor):
"""Opposite of above function, gather values from model parallel ranks."""
world_size = parallel_state.get_tensor_model_parallel_world_size()
numel = torch.numel(tensor)
numel_gathered = world_size * numel
gathered = torch.empty(
numel_gathered,
dtype=tensor.dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
torch.distributed.all_gather_into_tensor(
gathered,
tensor,
group=parallel_state.get_tensor_model_parallel_group()
)
return gathered
| GeneSplice-main | GeneSplice/apex/apex/transformer/utils.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron number of micro-batches calculators."""
from abc import ABC
from abc import abstractmethod
from typing import Optional, List
from apex.transformer.log_util import get_transformer_logger
_logger = get_transformer_logger(__name__)
def build_num_microbatches_calculator(
rank: int,
rampup_batch_size: Optional[List[int]],
global_batch_size: int,
micro_batch_size: int,
data_parallel_size: int,
):
# Constant num micro-batches.
if rampup_batch_size is None:
num_microbatches_calculator = ConstantNumMicroBatches(
global_batch_size, micro_batch_size, data_parallel_size
)
if rank == 0:
_logger.info(
"setting number of micro-batches to constant {}".format(
num_microbatches_calculator.get()
)
)
else:
assert len(rampup_batch_size) == 3, (
"expected the following "
"format: --rampup-batch-size <start batch size> "
"<batch size incerement> <ramp-up samples>"
)
start_batch_size = int(rampup_batch_size[0])
batch_size_increment = int(rampup_batch_size[1])
ramup_samples = int(rampup_batch_size[2])
if rank == 0:
_logger.info(
"will use batch size rampup starting from global batch "
"size {} to global batch size {} with batch size increments "
"{} over {} samples.".format(
start_batch_size,
global_batch_size,
batch_size_increment,
ramup_samples,
)
)
num_microbatches_calculator = RampupBatchsizeNumMicroBatches(
start_batch_size,
batch_size_increment,
ramup_samples,
global_batch_size,
micro_batch_size,
data_parallel_size,
)
return num_microbatches_calculator
class NumMicroBatchesCalculator(ABC):
def __init__(self):
self.num_micro_batches = None
self.current_global_batch_size = None
def get(self):
return self.num_micro_batches
def get_current_global_batch_size(self):
return self.current_global_batch_size
@abstractmethod
def update(self, consumed_samples, consistency_check):
pass
class ConstantNumMicroBatches(NumMicroBatchesCalculator):
def __init__(self, global_batch_size, micro_batch_size, data_parallel_size):
micro_batch_times_data_parallel = micro_batch_size * data_parallel_size
assert global_batch_size % micro_batch_times_data_parallel == 0, (
"global batch size ({}) is not divisible by micro batch size ({})"
" times data parallel size ({})".format(
global_batch_size, micro_batch_size, data_parallel_size
)
)
self.num_micro_batches = global_batch_size // micro_batch_times_data_parallel
assert self.num_micro_batches >= 1
self.current_global_batch_size = global_batch_size
self.micro_batch_size = micro_batch_size
def update(self, consumed_samples, consistency_check):
pass
class RampupBatchsizeNumMicroBatches(NumMicroBatchesCalculator):
def __init__(
self,
start_batch_size,
batch_size_increment,
ramup_samples,
global_batch_size,
micro_batch_size,
data_parallel_size,
):
"""Batch size ramp up.
Over
steps = (global-batch-size - start-batch-size) / batch_size_increment
increment batch size from start-batch-size to global-batch-size using
rampup-samples / steps
samples.
Arguments:
start_batch_size: global batch size to start with
batch_size_increment: global batch size increments
ramup_samples: number of samples to use ramp up global
batch size from `start_batch_size` to `global_batch_size`
global_batch_size: global batch size post rampup
micro_batch_size: micro batch size
data_parallel_size: data parallel size.
"""
self.micro_batch_size = micro_batch_size
self.data_parallel_size = data_parallel_size
self.micro_batch_times_data_parallel_size = (
self.micro_batch_size * self.data_parallel_size
)
assert self.micro_batch_times_data_parallel_size > 0
assert start_batch_size > 0
self.start_batch_size = start_batch_size
assert global_batch_size > 0
self.global_batch_size = global_batch_size
diff_batch_size = self.global_batch_size - self.start_batch_size
assert diff_batch_size >= 0
assert batch_size_increment > 0
self.batch_size_increment = batch_size_increment
assert diff_batch_size % batch_size_increment == 0, (
"expected "
"global batch size interval ({}) to be divisible by global batch "
"size increment ({})".format(diff_batch_size, batch_size_increment)
)
num_increments = diff_batch_size // self.batch_size_increment
self.ramup_samples = ramup_samples
assert self.ramup_samples >= 0
self.rampup_samples_per_increment = self.ramup_samples / num_increments
# Initialize number of microbatches.
self.update(0, False)
def update(self, consumed_samples, consistency_check):
if consumed_samples > self.ramup_samples:
self.current_global_batch_size = self.global_batch_size
else:
steps = int(consumed_samples / self.rampup_samples_per_increment)
self.current_global_batch_size = (
self.start_batch_size + steps * self.batch_size_increment
)
assert self.current_global_batch_size <= self.global_batch_size
if consistency_check:
assert (
self.current_global_batch_size
% self.micro_batch_times_data_parallel_size
== 0
), (
"current global "
"batch size ({}) is not divisible by micro-batch-size ({}) times"
"data parallel size ({})".format(
self.current_global_batch_size,
self.micro_batch_size,
self.data_parallel_size,
)
)
self.num_micro_batches = (
self.current_global_batch_size // self.micro_batch_times_data_parallel_size
)
| GeneSplice-main | GeneSplice/apex/apex/transformer/microbatches.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from apex.transformer.parallel_state import get_tensor_model_parallel_group
from apex.transformer.parallel_state import get_tensor_model_parallel_rank
from apex.transformer.parallel_state import get_tensor_model_parallel_world_size
from apex.transformer.tensor_parallel.utils import VocabUtility
class _VocabParallelCrossEntropy(torch.autograd.Function):
@staticmethod
def forward(ctx, vocab_parallel_logits, target, label_smoothing=0.0):
# Maximum value along vocab dimension across all GPUs.
logits_max = torch.max(vocab_parallel_logits, dim=-1)[0]
torch.distributed.all_reduce(
logits_max, op=torch.distributed.ReduceOp.MAX, group=get_tensor_model_parallel_group()
)
# Subtract the maximum value.
vocab_parallel_logits = vocab_parallel_logits - logits_max.unsqueeze(dim=-1)
# Get the partition's vocab indecies
get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
partition_vocab_size = vocab_parallel_logits.size()[-1]
rank = get_tensor_model_parallel_rank()
world_size = get_tensor_model_parallel_world_size()
vocab_start_index, vocab_end_index = get_vocab_range(partition_vocab_size, rank, world_size)
# Create a mask of valid vocab ids (1 means it needs to be masked).
target_mask = (target < vocab_start_index) | (target >= vocab_end_index)
masked_target = target.clone() - vocab_start_index
masked_target[target_mask] = 0
# Get predicted-logits = logits[target].
# For Simplicity, we convert logits to a 2-D tensor with size
# [*, partition-vocab-size] and target to a 1-D tensor of size [*].
logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size)
masked_target_1d = masked_target.view(-1)
arange_1d = torch.arange(start=0, end=logits_2d.size()[0], device=logits_2d.device)
predicted_logits_1d = logits_2d[arange_1d, masked_target_1d]
predicted_logits_1d = predicted_logits_1d.clone().contiguous()
predicted_logits = predicted_logits_1d.view_as(target)
predicted_logits[target_mask] = 0.0
# All reduce is needed to get the chunks from other GPUs.
torch.distributed.all_reduce(
predicted_logits, op=torch.distributed.ReduceOp.SUM, group=get_tensor_model_parallel_group()
)
# Sum of exponential of logits along vocab dimension across all GPUs.
exp_logits = vocab_parallel_logits
torch.exp(vocab_parallel_logits, out=exp_logits)
sum_exp_logits = exp_logits.sum(dim=-1)
torch.distributed.all_reduce(
sum_exp_logits, op=torch.distributed.ReduceOp.SUM, group=get_tensor_model_parallel_group()
)
# Loss = log(sum(exp(logits))) - predicted-logit.
loss = torch.log(sum_exp_logits) - predicted_logits
# Store softmax, target-mask and masked-target for backward pass.
exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))
vocab_size = exp_logits.size(-1)
if label_smoothing > 0:
"""
We'd like to assign 1 / (K - 1) probability mass to every index that is not the ground truth.
= (1 - alpha) * y_gt + alpha * mean(y_{i for i != gt})
= (1 - alpha) * y_gt + (alpha / (K - 1)) * \sum_{i != gt} y_i
= ((K - 1) * (1 - alpha) / (K - 1)) * y_gt + (alpha / (K - 1)) * \sum_{i != gt} y_i
= (K * (1 - alpha) - 1) / (K - 1)) * y_gt + (alpha / (K - 1)) * \sum_{i} y_i
= (1 - (alpha * K) / (K - 1)) * y_gt + ( (alpha * K) / (K - 1) ) * \sum_{i} y_i / K
From: https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/common/losses/smoothed_cross_entropy.py
"""
assert 1.0 > label_smoothing > 0.0
smoothing = label_smoothing * vocab_size / (vocab_size - 1)
# Exp logits at this point are normalized probabilities. So we can just take the log to get log-probs.
log_probs = torch.log(exp_logits)
mean_log_probs = log_probs.mean(dim=-1)
loss = (1.0 - smoothing) * loss - smoothing * mean_log_probs
ctx.label_smoothing, ctx.vocab_size = label_smoothing, vocab_size
ctx.save_for_backward(exp_logits, target_mask, masked_target_1d)
return loss
@staticmethod
def backward(ctx, grad_output):
# Retreive tensors from the forward path.
softmax, target_mask, masked_target_1d = ctx.saved_tensors
label_smoothing, vocab_size = ctx.label_smoothing, ctx.vocab_size
# All the inputs have softmax as thier gradient.
grad_input = softmax
# For simplicity, work with the 2D gradient.
partition_vocab_size = softmax.size()[-1]
grad_2d = grad_input.view(-1, partition_vocab_size)
# Add the gradient from matching classes.
arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=grad_2d.device)
softmax_update = 1.0 - target_mask.view(-1).float()
if label_smoothing > 0:
smoothing = label_smoothing * vocab_size / (vocab_size - 1)
grad_2d[arange_1d, masked_target_1d] -= (1.0 - smoothing) * softmax_update
average_grad = 1 / vocab_size
grad_2d[arange_1d, :] -= smoothing * average_grad
else:
grad_2d[arange_1d, masked_target_1d] -= softmax_update
# Finally elementwise multiplication with the output gradients.
grad_input.mul_(grad_output.unsqueeze(dim=-1))
return grad_input, None, None
def vocab_parallel_cross_entropy(vocab_parallel_logits, target, label_smoothing=0.0):
"""Helper function for the cross entropy."""
return _VocabParallelCrossEntropy.apply(vocab_parallel_logits, target, label_smoothing)
| GeneSplice-main | GeneSplice/apex/apex/transformer/tensor_parallel/cross_entropy.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(mkozuki): Remove this file as Megatron-LM seems to have done so.
import torch
# A dictionary of all the memory buffers allocated.
_MEM_BUFFS = dict()
def allocate_mem_buff(name, numel, dtype, track_usage):
"""Allocate a memory buffer."""
assert name not in _MEM_BUFFS, "memory buffer {} already allocated.".format(name)
_MEM_BUFFS[name] = MemoryBuffer(name, numel, dtype, track_usage)
return _MEM_BUFFS[name]
def get_mem_buff(name):
"""Get the memory buffer."""
return _MEM_BUFFS[name]
class MemoryBuffer:
"""Contiguous memory buffer.
Allocate a contiguous memory of type `dtype` and size `numel`. It is
used to reduce memory fragmentation.
Usage: After the allocation, the `_start` index is set tot the first
index of the memory. A memory chunk starting from `_start` index
can be `allocated` for an input tensor, with the elements of the
tensor being coppied. The buffer can be reused by resetting the
`_start` index.
"""
def __init__(self, name, numel, dtype, track_usage):
if torch.distributed.get_rank() == 0:
element_size = torch.tensor([], dtype=dtype).element_size()
print(
"> building the {} memory buffer with {} num elements "
"and {} dtype ({:.1f} MB)...".format(
name, numel, dtype, numel * element_size / 1024 / 1024
),
flush=True,
)
self.name = name
self.numel = numel
self.dtype = dtype
self.data = torch.empty(
self.numel,
dtype=self.dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
# Index tracking the start of the free memory.
self._start = 0
# Values used for tracking usage.
self.track_usage = track_usage
if self.track_usage:
self.in_use_value = 0.0
self.total_value = 0.0
def reset(self):
"""Reset the buffer start index to the beginning of the buffer."""
self._start = 0
def is_in_use(self):
"""Whether the current buffer hold on to any memory."""
return self._start > 0
def numel_in_use(self):
"""Return number of elements in use."""
return self._start
def add(self, tensor):
"""Allocate a chunk of memory from the buffer to tensor and copy
the values."""
assert (
tensor.dtype == self.dtype
), "Input tensor type {} different from buffer type {}".format(
tensor.dtype, self.dtype
)
# Number of elements of the input tensor.
tensor_numel = torch.numel(tensor)
new_start = self._start + tensor_numel
assert (
new_start <= self.numel
), "Not enough memory left in the buffer ({} > {})".format(
tensor_numel, self.numel - self._start
)
# New tensor is a view into the memory.
new_tensor = self.data[self._start : new_start]
self._start = new_start
new_tensor = new_tensor.view(tensor.shape)
new_tensor.copy_(tensor)
# Return a pointer to the new tensor.
return new_tensor
def get_data(self):
"""Return the data currently in use."""
if self.track_usage:
self.in_use_value += float(self._start)
self.total_value += float(self.numel)
return self.data[: self._start]
def print_average_usage(self):
"""Print memory usage average over time. We would like this value
to be as high as possible."""
assert self.track_usage, "You need to enable track usage."
if torch.distributed.get_rank() == 0:
print(
" > usage of {} memory buffer: {:.2f} %".format(
self.name, self.in_use_value * 100.0 / self.total_value
),
flush=True,
)
class RingMemBuffer:
"""A ring of memory buffers."""
def __init__(self, name, num_buffers, numel, dtype, track_usage):
self.num_buffers = num_buffers
self.buffers = [
allocate_mem_buff(name + " {}".format(i), numel, dtype, track_usage)
for i in range(num_buffers)
]
self._index = -1
def get_next_buffer(self):
self._index += 1
self._index = self._index % self.num_buffers
buff = self.buffers[self._index]
assert not buff.is_in_use(), "buffer is already in use."
return buff
| GeneSplice-main | GeneSplice/apex/apex/transformer/tensor_parallel/memory.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model parallel utility interface."""
from apex.transformer.tensor_parallel.cross_entropy import vocab_parallel_cross_entropy
from apex.transformer.tensor_parallel.data import broadcast_data
from apex.transformer.tensor_parallel.layers import (
ColumnParallelLinear,
RowParallelLinear,
VocabParallelEmbedding,
set_tensor_model_parallel_attributes,
set_defaults_if_not_set_tensor_model_parallel_attributes,
copy_tensor_model_parallel_attributes,
)
from apex.transformer.tensor_parallel.mappings import (
copy_to_tensor_model_parallel_region,
gather_from_tensor_model_parallel_region,
reduce_from_tensor_model_parallel_region,
scatter_to_tensor_model_parallel_region,
scatter_to_sequence_parallel_region,
)
from .random import (
checkpoint,
get_cuda_rng_tracker,
init_checkpointed_activations_memory_buffer,
model_parallel_cuda_manual_seed,
reset_checkpointed_activations_memory_buffer,
)
from apex.transformer.tensor_parallel.utils import split_tensor_along_last_dim
__all__ = [
# cross_entropy.py
"vocab_parallel_cross_entropy",
# data.py
"broadcast_data",
# layers.py
"ColumnParallelLinear",
"RowParallelLinear",
"VocabParallelEmbedding",
"set_tensor_model_parallel_attributes",
"set_defaults_if_not_set_tensor_model_parallel_attributes",
"copy_tensor_model_parallel_attributes",
# mappings.py
"copy_to_tensor_model_parallel_region",
"gather_from_tensor_model_parallel_region",
"reduce_from_tensor_model_parallel_region",
"scatter_to_tensor_model_parallel_region",
"scatter_to_sequence_parallel_region",
# random.py
"checkpoint",
"get_cuda_rng_tracker",
"init_checkpointed_activations_memory_buffer",
"model_parallel_cuda_manual_seed",
"reset_checkpointed_activations_memory_buffer",
# utils.py
"split_tensor_along_last_dim",
]
| GeneSplice-main | GeneSplice/apex/apex/transformer/tensor_parallel/__init__.py |
# coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE(mkozuki): This file is based on megatron-lm/mpu/random.py with some differences:
# - Not using "viewless" tensor:
# - _kernel_make_viewless_tensor
# - MakeViewlessTensor
# - make_viewless_tensor
# - assert_viewless_tensor
# - safely_set_viewless_tensor_data
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
import contextlib
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
from torch.utils.checkpoint import detach_variable
from apex.transformer.parallel_state import get_tensor_model_parallel_rank
from apex.transformer.tensor_parallel.memory import allocate_mem_buff
from apex.transformer.utils import split_tensor_into_1d_equal_chunks
from apex.transformer.utils import gather_split_1d_tensor
# Default name for the model parallel rng tracker.
_MODEL_PARALLEL_RNG_TRACKER_NAME = "model-parallel-rng"
# TODO(mkozuki): Remove `_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER` as megatron-lm doesn't seem to use.
# Whether apply model parallelism to checkpointed hidden states.
_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER = None
# TODO(mkozuki): Remove `init_checkpointed_activations_memory_buffer` as megatron-lm doesn't seem to use.
def init_checkpointed_activations_memory_buffer(
micro_batch_size,
max_position_embeddings,
hidden_size,
num_layers,
tensor_model_parallel_size,
checkpoint_num_layers,
fp16,
):
"""Initializ the memory buffer for the checkpointed activations."""
per_layer = (
micro_batch_size
* max_position_embeddings
* hidden_size
// tensor_model_parallel_size
)
assert (
num_layers % checkpoint_num_layers == 0
), "number of layers is not divisible by checkpoint-num-layers"
num_checkpointer_layers = num_layers // checkpoint_num_layers
numel = per_layer * num_checkpointer_layers
dtype = torch.half
if not fp16:
dtype = torch.float
global _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER
assert (
_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is None
), "checkpointed activations memory buffer is already allocated."
_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER = allocate_mem_buff(
"checkpointed activations", numel, dtype, track_usage=False
)
# TODO(mkozuki): Remove `reset_checkpointed_activations_memory_buffer` as megatron-lm doesn't seem to use.
def reset_checkpointed_activations_memory_buffer():
"""Reset the memory used for checkpointing."""
if _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is not None:
_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER.reset()
def _set_cuda_rng_state(new_state, device=-1):
"""Sets the random number generator state of the current GPU.
Arguments:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
"""
if hasattr(_C, "_cuda_setRNGState") and callable(_C._cuda_setRNGState):
# older PyTorch
def cb():
with device_ctx_manager(device):
_C._cuda_setRNGState(new_state)
else:
# newer PyTorch
if device == -1:
device = torch.device("cuda")
elif isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device("cuda", device)
def cb():
idx = device.index
if idx is None:
idx = torch.cuda.current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.set_state(new_state)
_lazy_call(cb)
class CudaRNGStatesTracker:
"""Tracker for the cuda RNG states.
Using the `add` method, a cuda rng state is initialized based on
the input `seed` and is assigned to `name`. Later, by forking the
rng state, we can perform operations and return to our starting
cuda state.
"""
def __init__(self):
# Map from a string name to the cuda rng state.
self.states_ = {}
# Seeds are just for book keeping and ensure no seed is set twice.
self.seeds_ = set()
def reset(self):
"""Set to the initial state (no tracker)."""
self.states_ = {}
self.seeds_ = set()
def get_states(self):
"""Get rng states. Copy the dictionary so we have direct
pointers to the states, not just a pointer to the dictionary."""
states = {}
for name in self.states_:
states[name] = self.states_[name]
return states
def set_states(self, states):
"""Set the rng states. For efficiency purposes, we do not check
the size of seed for compatibility."""
self.states_ = states
def add(self, name, seed):
"""Track the rng state."""
# Check seed is not already used.
if seed in self.seeds_:
raise Exception("seed {} already exists".format(seed))
self.seeds_.add(seed)
# Check that state is not already defined.
if name in self.states_:
raise Exception("cuda rng state {} already exists".format(name))
# Get the current rng state.
orig_rng_state = torch.cuda.get_rng_state()
# Set the new state and store it.
torch.cuda.manual_seed(seed)
self.states_[name] = torch.cuda.get_rng_state()
# Reset rng state to what it was.
_set_cuda_rng_state(orig_rng_state)
@contextlib.contextmanager
def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME):
"""Fork the cuda rng state, perform operations, and exit with
the original state."""
# Check if we have added the state
if name not in self.states_:
raise Exception("cuda rng state {} is not added".format(name))
# Store current rng state.
orig_cuda_rng_state = torch.cuda.get_rng_state()
# Set rng state to the desired one
_set_cuda_rng_state(self.states_[name])
# Do the stuff we wanted to do.
try:
yield
finally:
# Update the current rng state for later use.
self.states_[name] = torch.cuda.get_rng_state()
# And set the state to the original state we started with.
_set_cuda_rng_state(orig_cuda_rng_state)
# RNG tracker object.
_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
def get_cuda_rng_tracker():
"""Get cuda rng tracker."""
return _CUDA_RNG_STATE_TRACKER
def model_parallel_cuda_manual_seed(seed):
"""Initialize model parallel cuda seed.
This function should be called after the model parallel is
initialized. Also, no torch.cuda.manual_seed should be called
after this function. Basically, this is replacement for that
function.
Two set of RNG states are tracked:
default state: This is for data parallelism and is the same among a
set of model parallel GPUs but different across
different model paralle groups. This is used for
example for dropout in the non-tensor-model-parallel regions.
tensor-model-parallel state: This state is different among a set of model
parallel GPUs, but the same across data parallel
groups. This is used for example for dropout in
model parallel regions.
"""
# 2718 is just for fun and any POSITIVE value will work.
offset = seed + 2718
tensor_model_parallel_seed = offset + get_tensor_model_parallel_rank()
# Data parallel gets the original seed.
data_parallel_seed = seed
_CUDA_RNG_STATE_TRACKER.reset()
# Set the default state.
torch.cuda.manual_seed(data_parallel_seed)
# and model parallel state.
_CUDA_RNG_STATE_TRACKER.add(
_MODEL_PARALLEL_RNG_TRACKER_NAME, tensor_model_parallel_seed
)
# TODO (mkozuki): Move the below gradient checkpoint related features to another (new) file.
class CheckpointFunction(torch.autograd.Function):
"""This function is adapted from torch.utils.checkpoint with
two main changes:
1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state`
2) the states in the model parallel tracker are also properly
tracked/set/reset.
"""
@staticmethod
def forward(ctx, run_function, distribute_saved_activations, *args):
ctx.run_function = run_function
ctx.distribute_saved_activations = distribute_saved_activations
# Copy the rng states.
ctx.fwd_cpu_rng_state = torch.get_rng_state()
ctx.fwd_cuda_rng_state = torch.cuda.get_rng_state()
ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
with torch.no_grad():
outputs = run_function(*args)
# Divide hidden states across model parallel group and only keep
# the chunk corresponding to the current rank.
if ctx.distribute_saved_activations:
ctx.input_0_shape = args[0].shape
# Store everything.
ctx.save_for_backward(*args)
return outputs
@staticmethod
def backward(ctx, *args):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError(
"Checkpointing is not compatible with .grad(), "
"please use .backward() if possible"
)
inputs = ctx.saved_tensors
# Store the current states.
bwd_cpu_rng_state = torch.get_rng_state()
bwd_cuda_rng_state = torch.cuda.get_rng_state()
bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
# Set the states to what it used to be before the forward pass.
torch.set_rng_state(ctx.fwd_cpu_rng_state)
_set_cuda_rng_state(ctx.fwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
# Compute the forward pass.
detached_inputs = detach_variable(inputs)
with torch.enable_grad():
outputs = ctx.run_function(*detached_inputs)
# Set the states back to what it was at the start of this function.
torch.set_rng_state(bwd_cpu_rng_state)
_set_cuda_rng_state(bwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
if isinstance(outputs, torch.Tensor):
outputs = (outputs,)
torch.autograd.backward(outputs, args)
grads = tuple(
inp.grad if isinstance(inp, torch.Tensor) else inp
for inp in detached_inputs
)
return (None, None) + grads
# NOTE(mkozuki): It doesn't look like `distribute_saved_activations` is used in apex.transformer
# but I added this change to reduce the superficial difference from Megatron-LM.
def checkpoint(function, distribute_saved_activations, *args):
"""Checkpoint a model or part of the model.
This has been directly copied from torch.utils.checkpoint."""
return CheckpointFunction.apply(function, distribute_saved_activations, *args)
| GeneSplice-main | GeneSplice/apex/apex/transformer/tensor_parallel/random.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Sequence
import torch
from apex.transformer.utils import divide
def split_tensor_along_last_dim(
tensor: torch.Tensor,
num_partitions: int,
contiguous_split_chunks: bool = False,
) -> List[torch.Tensor]:
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
last_dim_size = divide(tensor.size()[last_dim], num_partitions)
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
class VocabUtility:
"""Split the vocabulary into `world_size` chunks and return the
first and last index of the vocabulary belonging to the `rank`
partition: Note that indices in [fist, last)"""
@staticmethod
def vocab_range_from_per_partition_vocab_size(
per_partition_vocab_size: int, rank, world_size: int
) -> Sequence[int]:
index_f = rank * per_partition_vocab_size
index_l = index_f + per_partition_vocab_size
return index_f, index_l
@staticmethod
def vocab_range_from_global_vocab_size(global_vocab_size: int, rank: int, world_size: int) -> Sequence[int]:
per_partition_vocab_size = divide(global_vocab_size, world_size)
return VocabUtility.vocab_range_from_per_partition_vocab_size(
per_partition_vocab_size, rank, world_size
)
| GeneSplice-main | GeneSplice/apex/apex/transformer/tensor_parallel/utils.py |
# coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
from typing import Optional, Dict, Tuple, List
import warnings
import torch
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.parameter import Parameter
from apex._autocast_utils import _cast_if_autocast_enabled
from apex.transformer.parallel_state import get_tensor_model_parallel_group
from apex.transformer.parallel_state import get_tensor_model_parallel_rank
from apex.transformer.parallel_state import get_tensor_model_parallel_world_size
from apex.transformer.utils import divide
from apex.transformer.tensor_parallel.mappings import (
copy_to_tensor_model_parallel_region,
)
from apex.transformer.tensor_parallel.mappings import (
gather_from_tensor_model_parallel_region,
)
from apex.transformer.tensor_parallel.mappings import (
reduce_from_tensor_model_parallel_region,
)
from apex.transformer.tensor_parallel.mappings import (
scatter_to_tensor_model_parallel_region,
)
from apex.transformer.tensor_parallel.mappings import (
reduce_scatter_to_sequence_parallel_region,
)
from apex.transformer.tensor_parallel.random import get_cuda_rng_tracker
from apex.transformer.tensor_parallel.utils import VocabUtility
from apex.transformer.log_util import get_transformer_logger
# `all_gather_into_tensor` and `reduce_scatter_tensor` are new placeholders for
# `_all_gather_base` and `_reduce_scatter_base`. They require the most recent
# version of PyTorch. The following 4 lines are for backward comparability with
# older PyTorch.
if "reduce_scatter_tensor" not in dir(torch.distributed):
torch.distributed.reduce_scatter_tensor = torch.distributed._reduce_scatter_base
if "all_gather_into_tensor" not in dir(torch.distributed):
torch.distributed.all_gather_into_tensor = torch.distributed._all_gather_base
_logger = get_transformer_logger(__name__)
_grad_accum_fusion_available = True
try:
import fused_weight_gradient_mlp_cuda
except ImportError:
_grad_accum_fusion_available = False
_MODEL_PARALLEL_ATTRIBUTE_DEFAULTS = {
"tensor_model_parallel": False,
"partition_dim": -1,
"partition_stride": 1,
}
def param_is_not_tensor_parallel_duplicate(param: torch.Tensor) -> bool:
return (
hasattr(param, "tensor_model_parallel") and param.tensor_model_parallel
) or (get_tensor_model_parallel_rank() == 0)
def set_tensor_model_parallel_attributes(tensor: torch.Tensor, is_parallel: bool, dim: int, stride: int) -> None:
# Make sure the attributes are not set.
for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
assert not hasattr(tensor, attribute)
# Set the attributes.
setattr(tensor, "tensor_model_parallel", is_parallel)
setattr(tensor, "partition_dim", dim)
setattr(tensor, "partition_stride", stride)
def set_defaults_if_not_set_tensor_model_parallel_attributes(tensor: torch.Tensor) -> None:
def maybe_set(attribute, value):
if not hasattr(tensor, attribute):
setattr(tensor, attribute, value)
for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
maybe_set(attribute, _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS[attribute])
def copy_tensor_model_parallel_attributes(destination_tensor: torch.Tensor, source_tensor: torch.Tensor) -> None:
def maybe_copy(attribute):
if hasattr(source_tensor, attribute):
setattr(destination_tensor, attribute, getattr(source_tensor, attribute))
for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
maybe_copy(attribute)
def _initialize_affine_weight_gpu(weight, init_method, partition_dim, stride=1):
"""Initialize affine weight for model parallel on GPU.
Args:
weight (Parameter):
init_method (Callable[[Tensor], None]): Taking a Tensor and initialize its elements.
partition_dim (int): Dimension to apply partition.
stride (int):
"""
set_tensor_model_parallel_attributes(
tensor=weight, is_parallel=True, dim=partition_dim, stride=stride
)
with get_cuda_rng_tracker().fork():
init_method(weight)
# TODO (mkozuki): Re-consider removing params_dtype from arguments to make this
# more parallel with _initialize_affine_weight_gpu
def _initialize_affine_weight_cpu(
weight,
output_size,
input_size,
per_partition_size,
partition_dim,
init_method,
stride=1,
return_master_weight=False,
*,
params_dtype=torch.float32,
):
"""Initialize affine weight for model parallel.
Build the master weight on all processes and scatter
the relevant chunk."""
set_tensor_model_parallel_attributes(
tensor=weight, is_parallel=True, dim=partition_dim, stride=stride
)
# Initialize master weight
master_weight = torch.empty(
output_size, input_size, dtype=torch.float, requires_grad=False
)
init_method(master_weight)
master_weight = master_weight.to(dtype=params_dtype)
# Split and copy
per_partition_per_stride_size = divide(per_partition_size, stride)
weight_list = torch.split(
master_weight, per_partition_per_stride_size, dim=partition_dim
)
rank = get_tensor_model_parallel_rank()
world_size = get_tensor_model_parallel_world_size()
my_weight_list = weight_list[rank::world_size]
with torch.no_grad():
torch.cat(my_weight_list, dim=partition_dim, out=weight)
if return_master_weight:
return master_weight
return None
class VocabParallelEmbedding(torch.nn.Module):
"""Embedding parallelized in the vocabulary dimension.
This is mainly adapted from torch.nn.Embedding and all the default
values are kept.
Arguments:
num_embeddings: vocabulary size.
embedding_dim: size of hidden state.
init_method: method to initialize weights.
"""
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
init_method=init.xavier_normal_,
*,
params_dtype: torch.dtype=torch.float32,
use_cpu_initialization: bool = False,
):
super().__init__()
# Keep the input dimensions.
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
# Set the detauls for compatibility.
self.padding_idx = None
self.max_norm = None
self.norm_type = 2.0
self.scale_grad_by_freq = False
self.sparse = False
self._weight = None
self.tensor_model_parallel_size = get_tensor_model_parallel_world_size()
# Divide the weight matrix along the vocabulary dimension.
(
self.vocab_start_index,
self.vocab_end_index,
) = VocabUtility.vocab_range_from_global_vocab_size(
self.num_embeddings,
get_tensor_model_parallel_rank(),
self.tensor_model_parallel_size,
)
self.num_embeddings_per_partition = (
self.vocab_end_index - self.vocab_start_index
)
# Allocate weights and initialize.
if use_cpu_initialization:
self.weight = Parameter(
torch.empty(
self.num_embeddings_per_partition,
self.embedding_dim,
dtype=params_dtype,
)
)
_initialize_affine_weight_cpu(
self.weight,
self.num_embeddings,
self.embedding_dim,
self.num_embeddings_per_partition,
0,
init_method,
params_dtype=params_dtype,
)
else:
self.weight = Parameter(
torch.empty(
self.num_embeddings_per_partition,
self.embedding_dim,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
_initialize_affine_weight_gpu(
self.weight, init_method, partition_dim=0, stride=1
)
def forward(self, input_):
if self.tensor_model_parallel_size > 1:
# Build the mask.
input_mask = (input_ < self.vocab_start_index) | (
input_ >= self.vocab_end_index
)
# Mask the input.
masked_input = input_.clone() - self.vocab_start_index
masked_input[input_mask] = 0
else:
masked_input = input_
# Get the embeddings.
output_parallel = F.embedding(
masked_input,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
# Mask the output embedding.
if self.tensor_model_parallel_size > 1:
output_parallel[input_mask, :] = 0.0
# Reduce across all the model parallel GPUs.
output = reduce_from_tensor_model_parallel_region(output_parallel)
return output
class LinearWithGradAccumulationAndAsyncCommunication(torch.autograd.Function):
"""Linear layer execution with asynchronous communication and gradient accumulation fusion in backprop."""
@staticmethod
def forward(
ctx,
input: torch.Tensor,
weight: torch.Tensor,
bias: Optional[torch.Tensor],
gradient_accumulation_fusion: bool,
async_grad_allreduce: bool,
sequence_parallel_enabled: bool,
use_16bit_in_wgrad_accum_fusion: Optional[bool] = None,
):
ctx.use_bias = bias is not None and weight.requires_grad
ctx.gradient_accumulation_fusion = gradient_accumulation_fusion
ctx.async_grad_allreduce = async_grad_allreduce
ctx.sequence_parallel_enabled = sequence_parallel_enabled
ctx.compute_weight_gradient = weight.requires_grad
if use_16bit_in_wgrad_accum_fusion is not None:
warnings.warn(
"Deprecated option `use_16bit_in_wgrad_accum_fusion` "
f"is set to {use_16bit_in_wgrad_accum_fusion}"
)
if ctx.compute_weight_gradient:
ctx.save_for_backward(input, weight)
else:
ctx.save_for_backward(weight)
if ctx.sequence_parallel_enabled:
world_size = get_tensor_model_parallel_world_size()
# `input` is supposed to be 3D and its order of dimension is [sequence, batch, hidden]
shape = list(input.shape)
shape[0] *= world_size
all_gather_buffer = torch.empty(
shape,
dtype=input.dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
torch.distributed.all_gather_into_tensor(all_gather_buffer, input, group=get_tensor_model_parallel_group())
total_input = all_gather_buffer
else:
total_input = input
output = torch.matmul(total_input, weight.t())
if bias is not None:
output = output + bias
return output
@staticmethod
def backward(ctx, grad_output):
if ctx.compute_weight_gradient:
input, weight = ctx.saved_tensors
else:
weight = ctx.saved_tensors[0]
input = None
use_bias = ctx.use_bias
#only get sequence parallel inputs if need to calculate weight grad
handle = None
if ctx.compute_weight_gradient:
if ctx.sequence_parallel_enabled:
world_size = get_tensor_model_parallel_world_size()
shape = list(input.shape)
shape[0] *= world_size
all_gather_buffer = torch.empty(
shape,
dtype=input.dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
handle = torch.distributed.all_gather_into_tensor(
all_gather_buffer,
input,
group=get_tensor_model_parallel_group(),
async_op=True,
)
total_input = all_gather_buffer
else:
total_input = input
grad_input = grad_output.matmul(weight)
if handle is not None:
handle.wait()
if ctx.async_grad_allreduce:
# Asynchronous all-reduce
handle = torch.distributed.all_reduce(
grad_input, group=get_tensor_model_parallel_group(), async_op=True
)
#if no weight gradient, immediately return
if not ctx.compute_weight_gradient:
if ctx.sequence_parallel_enabled:
assert not ctx.async_grad_allreduce
world_size = get_tensor_model_parallel_world_size()
shape = list(grad_input.shape)
shape[0] //= world_size
sub_grad_input = torch.empty(torch.Size(shape), dtype=grad_input.dtype, device=torch.cuda.current_device(), requires_grad=False)
handle = torch.distributed.reduce_scatter_tensor(
sub_grad_input,
grad_input,
group=get_tensor_model_parallel_group(),
async_op=True
)
handle.wait()
return sub_grad_input, None, None, None, None, None, None
if ctx.async_grad_allreduce:
handle.wait()
return grad_input, None, None, None, None, None, None
# Convert the tensor shapes to 2D for execution compatibility
grad_output = grad_output.contiguous()
grad_output = grad_output.view(
grad_output.shape[0] * grad_output.shape[1], grad_output.shape[2]
)
total_input = total_input.view(total_input.shape[0] * total_input.shape[1], total_input.shape[2])
if ctx.sequence_parallel_enabled:
assert not ctx.async_grad_allreduce
sub_grad_input = torch.empty(input.shape, dtype=input.dtype, device=torch.cuda.current_device(), requires_grad=False)
handle = torch.distributed.reduce_scatter_tensor(
sub_grad_input,
grad_input,
group=get_tensor_model_parallel_group(),
async_op=True
)
if ctx.gradient_accumulation_fusion:
if not hasattr(weight, "main_grad"):
raise RuntimeError("attempted to perform gradient accumulation fusion on param without setting main_grad")
if weight.main_grad.dtype == torch.float32:
fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp32(
total_input, grad_output, weight.main_grad
)
elif weight.main_grad.dtype in (torch.float16, torch.bfloat16):
fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp16(
total_input, grad_output, weight.main_grad
)
else:
raise RuntimeError(f"unsupported dtype for main_grad ({weight.main_grad.dtype})")
grad_weight = None
else:
grad_weight = grad_output.t().matmul(total_input)
grad_bias = grad_output.sum(dim=0) if use_bias else None
if ctx.sequence_parallel_enabled:
handle.wait()
return sub_grad_input, grad_weight, grad_bias, None, None, None, None
if ctx.async_grad_allreduce:
handle.wait()
return grad_input, grad_weight, grad_bias, None, None, None, None
def linear_with_grad_accumulation_and_async_allreduce(
input: torch.Tensor,
weight: torch.Tensor,
bias: Optional[torch.Tensor],
gradient_accumulation_fusion: bool,
async_grad_allreduce: bool,
sequence_parallel_enabled: bool,
) -> torch.Tensor:
args = _cast_if_autocast_enabled(
input,
weight,
bias,
gradient_accumulation_fusion,
async_grad_allreduce,
sequence_parallel_enabled,
)
with torch.cuda.amp.autocast(enabled=False):
return LinearWithGradAccumulationAndAsyncCommunication.apply(*args)
class ColumnParallelLinear(torch.nn.Module):
"""Linear layer with column parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its second dimension as A = [A_1, ..., A_p].
.. note::
Input is supposed to be three dimensional and each dimension
is expected to be sequence, batch, and hidden feature, respectively.
Arguments:
input_size: first dimension of matrix A.
output_size: second dimension of matrix A.
bias: If true, add bias
gather_output: If true, call all-gether on output and make Y avaiable
to all GPUs, otherwise, every GPU will have its output
which is Y_i = XA_i
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
skip_bias_add: This was added to enable performance optimations where bias
can be fused with other elementwise operations. we skip
adding bias but instead return it.
Keyword Arguments:
no_async_tensor_model_parallel_allreduce:
params_dtype:
use_cpu_initialization:
gradient_accumulation_fusion:
sequence_parallel_enabled:
accumulation_in_fp16: Deprecated
"""
def __init__(
self,
input_size,
output_size,
bias=True,
gather_output=True,
init_method=init.xavier_normal_,
stride=1,
keep_master_weight_for_test=False,
skip_bias_add=False,
*,
no_async_tensor_model_parallel_allreduce=False,
params_dtype=torch.float32,
use_cpu_initialization=False,
gradient_accumulation_fusion=False,
sequence_parallel_enabled: bool = False,
accumulation_in_fp16: Optional[bool] = None,
):
super().__init__()
# Keep input parameters
self.input_size = input_size
self.output_size = output_size
self.gather_output = gather_output
# Divide the weight matrix along the last dimension.
world_size = get_tensor_model_parallel_world_size()
self.output_size_per_partition = divide(output_size, world_size)
self.skip_bias_add = skip_bias_add
if accumulation_in_fp16 is not None:
warnings.warn(
f"Deprecated option `accumulation_in_fp16` is set to {accumulation_in_fp16}"
)
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
# Initialize weight.
if use_cpu_initialization:
self.weight = Parameter(
torch.empty(self.output_size_per_partition, self.input_size, dtype=params_dtype)
)
self.master_weight = _initialize_affine_weight_cpu(
self.weight,
self.output_size,
self.input_size,
self.output_size_per_partition,
0,
init_method,
stride=stride,
return_master_weight=keep_master_weight_for_test,
params_dtype=params_dtype,
)
else:
self.weight = Parameter(
torch.empty(
self.output_size_per_partition,
self.input_size,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
_initialize_affine_weight_gpu(self.weight, init_method, partition_dim=0, stride=stride)
if bias:
if use_cpu_initialization:
self.bias = Parameter(torch.empty(self.output_size_per_partition, dtype=params_dtype))
else:
self.bias = Parameter(
torch.empty(
self.output_size_per_partition,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
set_tensor_model_parallel_attributes(self.bias, True, 0, stride)
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter("bias", None)
self.async_tensor_model_parallel_allreduce = (
not no_async_tensor_model_parallel_allreduce and world_size > 1
)
if sequence_parallel_enabled:
if world_size <= 1:
warnings.warn(
f"`sequence_parallel_enabled` is set to `True`, but got world_size of {world_size}"
)
# sequence_parallel_enabled = False
self.sequence_parallel_enabled = sequence_parallel_enabled
if gradient_accumulation_fusion:
if not _grad_accum_fusion_available:
# Basically, apex.transformer module users are expected to install APEX's
# `--cpp_ext` and `--cuda_ext`. The example installation command is as follows:
# `pip install --global-option="--cpp_ext" --global-option="--cuda_ext ."
# at the root of APEX repository.
warnings.warn(
"`gradient_accumulation_fusion` is set to `True` but "
"the custom CUDA extension of `fused_weight_gradient_mlp_cuda` module not "
"found. Thus `gradient_accumulation_fusion` set to `False`. "
"Note that the extension requires CUDA>=11."
)
gradient_accumulation_fusion = False
self.gradient_accumulation_fusion = gradient_accumulation_fusion
if self.async_tensor_model_parallel_allreduce and self.sequence_parallel_enabled:
raise RuntimeError("`async_tensor_model_parallel_allreduce` and `sequence_parallel_enabled` cannot be enabled at the same time.")
self._forward_impl = linear_with_grad_accumulation_and_async_allreduce
def forward(self, input_: torch.Tensor) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Forward of ColumnParallelLinear
Args:
input_: 3D tensor whose order of dimension is [sequence, batch, hidden]
Returns:
- output
- bias
"""
bias = self.bias if not self.skip_bias_add else None
if self.async_tensor_model_parallel_allreduce or self.sequence_parallel_enabled:
input_parallel = input_
else:
input_parallel = copy_to_tensor_model_parallel_region(input_)
# Matrix multiply.
output_parallel = self._forward_impl(
input=input_parallel,
weight=self.weight,
bias=bias,
gradient_accumulation_fusion=self.gradient_accumulation_fusion,
async_grad_allreduce=self.async_tensor_model_parallel_allreduce,
sequence_parallel_enabled=self.sequence_parallel_enabled,
)
if self.gather_output:
# All-gather across the partitions.
assert not self.sequence_parallel_enabled
output = gather_from_tensor_model_parallel_region(output_parallel)
else:
output = output_parallel
output_bias = self.bias if self.skip_bias_add else None
return output, output_bias
class RowParallelLinear(torch.nn.Module):
"""Linear layer with row parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its first dimension and X along its second dimension as:
- -
| A_1 |
| . |
A = | . | X = [X_1, ..., X_p]
| . |
| A_p |
- -
.. note::
Input is supposed to be three dimensional and each dimension
is expected to be sequence, batch, and hidden feature, respectively.
Arguments:
input_size: first dimension of matrix A.
output_size: second dimension of matrix A.
bias: If true, add bias. Note that bias is not parallelized.
input_is_parallel: If true, we assume that the input is already
split across the GPUs and we do not split
again.
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
skip_bias_add: This was added to enable performance optimization where bias
can be fused with other elementwise operations. We skip
adding bias but instead return it.
Keyword Arguments:
params_dtype:
use_cpu_initialization:
gradient_accumulation_fusion:
sequence_parallel_enabled:
accumulation_in_fp16: Deprecated
"""
def __init__(
self,
input_size,
output_size,
bias=True,
input_is_parallel=False,
init_method=init.xavier_normal_,
stride=1,
keep_master_weight_for_test=False,
skip_bias_add=False,
*,
params_dtype=torch.float32,
use_cpu_initialization=False,
gradient_accumulation_fusion=False,
sequence_parallel_enabled: bool = False,
accumulation_in_fp16: Optional[bool] = None,
):
super().__init__()
# Keep input parameters
self.input_size = input_size
self.output_size = output_size
self.input_is_parallel = input_is_parallel
# Divide the weight matrix along the last dimension.
world_size = get_tensor_model_parallel_world_size()
self.input_size_per_partition = divide(input_size, world_size)
self.skip_bias_add = skip_bias_add
self.gradient_accumulation_fusion = gradient_accumulation_fusion
self.sequence_parallel_enabled = sequence_parallel_enabled
if self.sequence_parallel_enabled and not self.input_is_parallel:
raise RuntimeError("To enable `sequence_parallel_enabled`, `input_is_parallel` must be `True`")
if accumulation_in_fp16 is not None:
warnings.warn(
f"Deprecated option `accumulation_in_fp16` is set to {accumulation_in_fp16}"
)
# as an argument to this function?
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
# Initialize weight.
if use_cpu_initialization:
self.weight = Parameter(
torch.empty(
self.output_size, self.input_size_per_partition, dtype=params_dtype
)
)
self.master_weight = _initialize_affine_weight_cpu(
self.weight,
self.output_size,
self.input_size,
self.input_size_per_partition,
1,
init_method,
stride=stride,
return_master_weight=keep_master_weight_for_test,
params_dtype=params_dtype,
)
else:
self.weight = Parameter(
torch.empty(
self.output_size,
self.input_size_per_partition,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
_initialize_affine_weight_gpu(
self.weight, init_method, partition_dim=1, stride=stride
)
if bias:
if use_cpu_initialization:
self.bias = Parameter(torch.empty(self.output_size, dtype=params_dtype))
else:
self.bias = Parameter(
torch.empty(
self.output_size,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
setattr(self.bias, "sequence_parallel_enabled", sequence_parallel_enabled)
else:
self.register_parameter("bias", None)
self._forward_impl = linear_with_grad_accumulation_and_async_allreduce
def forward(self, input_: torch.Tensor) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Forward of RowParallelLinear
Args:
input_: 3D tensor whose order of dimension is [sequence, batch, hidden]
Returns:
- output
- bias
"""
# Set up backprop all-reduce.
if self.input_is_parallel:
input_parallel = input_
else:
assert not self.sequence_parallel_enabled
input_parallel = scatter_to_tensor_model_parallel_region(input_)
# Matrix multiply.
output_parallel = self._forward_impl(
input=input_parallel,
weight=self.weight,
bias=None,
gradient_accumulation_fusion=self.gradient_accumulation_fusion,
async_grad_allreduce=False,
sequence_parallel_enabled=False,
)
# All-reduce across all the partitions.
if self.sequence_parallel_enabled:
output_ = reduce_scatter_to_sequence_parallel_region(output_parallel)
else:
output_ = reduce_from_tensor_model_parallel_region(output_parallel)
if not self.skip_bias_add:
output = output_ + self.bias if self.bias is not None else output_
output_bias = None
else:
output = output_
output_bias = self.bias
return output, output_bias
| GeneSplice-main | GeneSplice/apex/apex/transformer/tensor_parallel/layers.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from apex.transformer.parallel_state import get_tensor_model_parallel_group
from apex.transformer.parallel_state import get_tensor_model_parallel_rank
from apex.transformer.parallel_state import get_tensor_model_parallel_src_rank
_MAX_DATA_DIM = 5
def _check_data_types(keys, data, target_dtype):
"""Check that all the keys have the same target data type."""
for key in keys:
assert data[key].dtype == target_dtype, (
"{} has data type {} which "
"is different than {}".format(key, data[key].dtype, target_dtype)
)
def _build_key_size_numel_dictionaries(keys, data):
"""Build the size on rank 0 and broadcast."""
max_dim = _MAX_DATA_DIM
sizes = [0 for _ in range(max_dim) for _ in keys]
# Pack the sizes on rank zero.
if get_tensor_model_parallel_rank() == 0:
offset = 0
for key in keys:
assert data[key].dim() < max_dim, "you should increase MAX_DATA_DIM"
size = data[key].size()
for i, s in enumerate(size):
sizes[i + offset] = s
offset += max_dim
# Move to GPU and broadcast.
sizes_cuda = torch.cuda.LongTensor(sizes)
torch.distributed.broadcast(
sizes_cuda,
get_tensor_model_parallel_src_rank(),
group=get_tensor_model_parallel_group(),
)
# Move back to cpu and unpack.
sizes_cpu = sizes_cuda.cpu()
key_size = {}
key_numel = {}
total_numel = 0
offset = 0
for key in keys:
i = 0
size = []
numel = 1
while sizes_cpu[offset + i] > 0:
this_size = sizes_cpu[offset + i]
size.append(this_size)
numel *= this_size
i += 1
key_size[key] = size
key_numel[key] = numel
total_numel += numel
offset += max_dim
return key_size, key_numel, total_numel
def broadcast_data(keys, data, datatype):
"""Broadcast data from rank zero of each model parallel group to the
members of the same model parallel group.
Arguments:
keys: list of keys in the data disctionary to be broadcasted
data: data dictionary of string keys and cpu tensor values.
datatype: torch data type of all tensors in data associated
with keys.
"""
# Build (key, size) and (key, number of elements) dictionaries along
# with the total number of elements on all ranks.
key_size, key_numel, total_numel = _build_key_size_numel_dictionaries(keys, data)
# Pack on rank zero.
if get_tensor_model_parallel_rank() == 0:
# Check that all keys have the same data type.
_check_data_types(keys, data, datatype)
# Flatten the data associated with the keys
flatten_data = torch.cat(
[data[key].contiguous().view(-1) for key in keys], dim=0
).cuda()
else:
flatten_data = torch.empty(
total_numel, device=torch.cuda.current_device(), dtype=datatype
)
# Broadcast
torch.distributed.broadcast(
flatten_data,
get_tensor_model_parallel_src_rank(),
group=get_tensor_model_parallel_group(),
)
# Unpack
output = {}
offset = 0
for key in keys:
size = key_size[key]
numel = key_numel[key]
output[key] = flatten_data.narrow(0, offset, numel).view(size)
offset += numel
return output
| GeneSplice-main | GeneSplice/apex/apex/transformer/tensor_parallel/data.py |
# coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from apex.transformer.parallel_state import get_tensor_model_parallel_group
from apex.transformer.parallel_state import get_tensor_model_parallel_world_size
from apex.transformer.parallel_state import get_tensor_model_parallel_rank
from apex.transformer.tensor_parallel.utils import split_tensor_along_last_dim
# `all_gather_into_tensor` and `reduce_scatter_tensor` are new placeholders for
# `_all_gather_base` and `_reduce_scatter_base`. They require the most recent
# version of PyTorch. The following 4 lines are for backward comparability with
# older PyTorch.
if "all_gather_into_tensor" not in dir(torch.distributed):
torch.distributed.all_gather_into_tensor = torch.distributed._all_gather_base
if "reduce_scatter_tensor" not in dir(torch.distributed):
torch.distributed.reduce_scatter_tensor = torch.distributed._reduce_scatter_base
def _reduce(input_: torch.Tensor) -> torch.Tensor:
"""All-reduce the input tensor across model parallel group."""
# Bypass the function if we are using only 1 GPU.
if get_tensor_model_parallel_world_size() == 1:
return input_
# All-reduce.
torch.distributed.all_reduce(input_, group=get_tensor_model_parallel_group())
return input_
def _split_along_last_dim(input_: torch.Tensor) -> torch.Tensor:
"""Split the tensor along its last dimension and keep the
corresponding slice."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
# Split along last dimension.
input_list = split_tensor_along_last_dim(input_, world_size)
# Note: torch.split does not create contiguous tensors by default.
rank = get_tensor_model_parallel_rank()
output = input_list[rank].contiguous()
return output
def _split_along_first_dim(input_: torch.Tensor) -> torch.Tensor:
"""Split the tensor along its first dimension and keep the corresponding slice."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU for tensor model parallel.
if world_size == 1:
return input_
# Split along first dimension.
dim_size = input_.size(0)
assert dim_size % world_size == 0
local_dim_size = dim_size // world_size
dim_offset = get_tensor_model_parallel_rank() * local_dim_size
output = input_[dim_offset:dim_offset + local_dim_size].contiguous()
return output
def _gather_along_last_dim(input_: torch.Tensor) -> torch.Tensor:
"""Gather tensors and concatenate along the last dimension."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
# Size and dimension.
last_dim = input_.dim() - 1
rank = get_tensor_model_parallel_rank()
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
tensor_list[rank] = input_
torch.distributed.all_gather(
tensor_list, input_, group=get_tensor_model_parallel_group()
)
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=last_dim).contiguous()
return output
def _gather_along_first_dim(input_: torch.Tensor) -> torch.Tensor:
"""Gather tensors and concatenate along the first dimension."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
shape = list(input_.shape)
shape[0] *= world_size
output = torch.empty(shape, dtype=input_.dtype, device=torch.cuda.current_device())
torch.distributed.all_gather_into_tensor(
output,
input_.contiguous(),
group=get_tensor_model_parallel_group()
)
return output
def _reduce_scatter_along_first_dim(input_: torch.Tensor) -> torch.Tensor:
"""Reduce-scatter the input tensor across model parallel group."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
shape = list(input_.shape)
assert shape[0] % world_size == 0
shape[0] //= world_size
output = torch.empty(shape, dtype=input_.dtype, device=torch.cuda.current_device())
torch.distributed.reduce_scatter_tensor(
output,
input_.contiguous(),
group=get_tensor_model_parallel_group()
)
return output
class _CopyToModelParallelRegion(torch.autograd.Function):
"""Pass the input to the tensor model parallel region."""
# FIXME(mkozuki): Definition of static symbolic methods don't look correct according to
# https://pytorch.org/docs/stable/onnx.html#static-symbolic-method
@staticmethod
def symbolic(graph, input_):
return input_
@staticmethod
def forward(ctx, input_):
return input_
@staticmethod
def backward(ctx, grad_output):
return _reduce(grad_output)
class _ReduceFromModelParallelRegion(torch.autograd.Function):
"""All-reduce the input from the tensor model parallel region."""
# FIXME(mkozuki): Definition of static symbolic methods don't look correct according to
# https://pytorch.org/docs/stable/onnx.html#static-symbolic-method
@staticmethod
def symbolic(graph, input_):
return _reduce(input_)
@staticmethod
def forward(ctx, input_):
return _reduce(input_)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class _ScatterToModelParallelRegion(torch.autograd.Function):
"""Split the input and keep only the corresponding chuck to the rank."""
# FIXME(mkozuki): Definition of static symbolic methods don't look correct according to
# https://pytorch.org/docs/stable/onnx.html#static-symbolic-method
@staticmethod
def symbolic(graph, input_):
return _split_along_last_dim(input_)
@staticmethod
def forward(ctx, input_):
return _split_along_last_dim(input_)
@staticmethod
def backward(ctx, grad_output):
return _gather_along_last_dim(grad_output)
class _GatherFromModelParallelRegion(torch.autograd.Function):
"""Gather the input from tensor model parallel region and concatenate."""
# FIXME(mkozuki): Definition of static symbolic methods don't look correct according to
# https://pytorch.org/docs/stable/onnx.html#static-symbolic-method
@staticmethod
def symbolic(graph, input_):
return _gather_along_last_dim(input_)
@staticmethod
def forward(ctx, input_):
return _gather_along_last_dim(input_)
@staticmethod
def backward(ctx, grad_output):
return _split_along_last_dim(grad_output)
class _ScatterToSequenceParallelRegion(torch.autograd.Function):
"""Split the input and keep only the corresponding chunk to the rank."""
# FIXME(mkozuki): Definition of static symbolic methods don't look correct according to
# https://pytorch.org/docs/stable/onnx.html#static-symbolic-method
@staticmethod
def symbolic(graph, input_):
return _split_along_first_dim(input_)
@staticmethod
def forward(ctx, input_):
return _split_along_first_dim(input_)
@staticmethod
def backward(ctx, grad_output):
return _gather_along_first_dim(grad_output)
class _GatherFromSequenceParallelRegion(torch.autograd.Function):
"""Gather the input from sequence parallel region and concatenate."""
# FIXME(mkozuki): Definition of static symbolic methods don't look correct according to
# https://pytorch.org/docs/stable/onnx.html#static-symbolic-method
@staticmethod
def symbolic(graph, input_, to_model_parallel: bool = True):
return _gather_along_first_dim(input_)
@staticmethod
def forward(ctx, input_, to_model_parallel: bool = True):
ctx.to_model_parallel = to_model_parallel
return _gather_along_first_dim(input_)
@staticmethod
def backward(ctx, grad_output):
if ctx.to_model_parallel:
return _reduce_scatter_along_first_dim(grad_output), None
else:
return _split_along_first_dim(grad_output), None
class _ReduceScatterToSequenceParallelRegion(torch.autograd.Function):
"""Reduce scatter the input from the sequence parallel region and concatenate."""
# FIXME(mkozuki): Definition of static symbolic methods don't look correct according to
# https://pytorch.org/docs/stable/onnx.html#static-symbolic-method
@staticmethod
def symbolic(graph, input_):
return _reduce_scatter_along_first_dim(input_)
@staticmethod
def forward(ctx, input_):
return _reduce_scatter_along_first_dim(input_)
@staticmethod
def backward(ctx, grad_output):
return _gather_along_first_dim(grad_output)
# -----------------
# Helper functions.
# -----------------
def copy_to_tensor_model_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _CopyToModelParallelRegion.apply(input_)
def reduce_from_tensor_model_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _ReduceFromModelParallelRegion.apply(input_)
def scatter_to_tensor_model_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _ScatterToModelParallelRegion.apply(input_)
def gather_from_tensor_model_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _GatherFromModelParallelRegion.apply(input_)
def scatter_to_sequence_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _ScatterToSequenceParallelRegion.apply(input_)
def gather_from_sequence_parallel_region(input_: torch.Tensor, to_model_parallel: bool = True) -> torch.Tensor:
return _GatherFromSequenceParallelRegion.apply(input_, to_model_parallel)
def reduce_scatter_to_sequence_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _ReduceScatterToSequenceParallelRegion.apply(input_)
__all__ = [
"copy_to_tensor_model_parallel_region",
"reduce_from_tensor_model_parallel_region",
"scatter_to_tensor_model_parallel_region",
"gather_from_tensor_model_parallel_region",
"scatter_to_sequence_parallel_region",
"gather_from_sequence_parallel_region",
"reduce_scatter_to_sequence_parallel_region",
]
| GeneSplice-main | GeneSplice/apex/apex/transformer/tensor_parallel/mappings.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
from apex.transformer.layers.layer_norm import FastLayerNorm
from apex.transformer.layers.layer_norm import FusedLayerNorm
from apex.transformer.layers.layer_norm import MixedFusedLayerNorm
__all__ = [
"FastLayerNorm",
"FusedLayerNorm",
"MixedFusedLayerNorm",
]
| GeneSplice-main | GeneSplice/apex/apex/transformer/layers/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# NOTE(mkozuki): This file defines two LayerNorm that are compatible with Megatron-LM.
# while avoiding introducing the breaking change of `"sequence_parallel_enabled"` attribute into apex.normalization.FusedLayerNorm
# and apex.contrib.layer_norm.FastLayerNorm.
import warnings
import torch
from apex.normalization import FusedLayerNorm as OrigFusedLayerNorm
from apex.normalization import MixedFusedLayerNorm as OrigMixedFusedLayerNorm
try:
from apex.contrib.layer_norm import FastLayerNorm as OrigFastLayerNorm
except ImportError:
HAS_FAST_LAYER_NORM = False
else:
HAS_FAST_LAYER_NORM = True
__all__ = [
"FusedLayerNorm",
"FastLayerNorm",
"MixedFusedLayerNorm",
]
def _set_sequence_parallel_enabled(
param: torch.Tensor,
sequence_parallel_enabled: bool,
) -> None:
setattr(param, "sequence_parallel_enabled", sequence_parallel_enabled)
class FusedLayerNorm(OrigFusedLayerNorm):
def __init__(
self,
normalized_shape,
eps: float = 1e-5,
elementwise_affine: bool = True,
*,
sequence_parallel_enabled: bool = False,
):
super().__init__(
normalized_shape=normalized_shape,
eps=eps,
elementwise_affine=elementwise_affine,
)
self.sequence_parallel_enabled = sequence_parallel_enabled
if self.elementwise_affine:
_set_sequence_parallel_enabled(self.weight, self.sequence_parallel_enabled)
_set_sequence_parallel_enabled(self.bias, self.sequence_parallel_enabled)
# note: MixedFusedLayerNorm is no different from FusedLayerNorm if it's used in `torch.cuda.amp`.
class MixedFusedLayerNorm(OrigMixedFusedLayerNorm):
def __init__(
self,
normalized_shape,
eps: float = 1e-5,
**kwargs,
) -> None:
self.sequence_parallel_enabled = kwargs.get("sequence_parallel_enabled", False)
super().__init__(normalized_shape=normalized_shape, eps=eps, **kwargs)
if self.sequence_parallel_enabled:
_set_sequence_parallel_enabled(self.weight, self.sequence_parallel_enabled)
_set_sequence_parallel_enabled(self.bias, self.sequence_parallel_enabled)
if HAS_FAST_LAYER_NORM:
class FastLayerNorm(OrigFastLayerNorm):
def __init__(
self,
hidden_size,
eps: float = 1e-5,
*,
sequence_parallel_enabled: bool = False,
):
super().__init__(
hidden_size=hidden_size,
eps=eps
)
self.sequence_parallel_enabled = sequence_parallel_enabled
_set_sequence_parallel_enabled(self.weight, self.sequence_parallel_enabled)
_set_sequence_parallel_enabled(self.bias, self.sequence_parallel_enabled)
else:
class FastLayerNorm(FusedLayerNorm):
def __init__(
self,
hidden_size,
eps: float = 1e-5,
*,
sequence_parallel_enabled: bool = False,
):
warnings.warn("`apex.contrib.layer_norm.FastLayerNorm` isn't available thus falling back to `apex.normalization.FusedLayerNorm`")
super().__init__(
normalized_shape=hidden_size,
eps=eps,
elementwise_affine=True,
sequence_parallel_enabled=sequence_parallel_enabled,
)
| GeneSplice-main | GeneSplice/apex/apex/transformer/layers/layer_norm.py |
import time
import torch
class _Timer:
"""Timer."""
def __init__(self, name):
self.name_ = name
self.elapsed_ = 0.0
self.started_ = False
self.start_time = time.time()
def start(self):
"""Start the timer."""
assert not self.started_, "timer has already been started"
torch.cuda.synchronize()
self.start_time = time.time()
self.started_ = True
def stop(self):
"""Stop the timer."""
assert self.started_, "timer is not started"
torch.cuda.synchronize()
self.elapsed_ += time.time() - self.start_time
self.started_ = False
def reset(self):
"""Reset timer."""
self.elapsed_ = 0.0
self.started_ = False
def elapsed(self, reset=True):
"""Calculate the elapsed time."""
started_ = self.started_
# If the timing in progress, end it first.
if self.started_:
self.stop()
# Get the elapsed time.
elapsed_ = self.elapsed_
# Reset the elapsed time
if reset:
self.reset()
# If timing was in progress, set it back.
if started_:
self.start()
return elapsed_
class _Timers:
"""Group of timers."""
def __init__(self):
self.timers = {}
def __call__(self, name):
if name not in self.timers:
self.timers[name] = _Timer(name)
return self.timers[name]
def write(self, names, writer, iteration, normalizer=1.0, reset=False):
"""Write timers to a tensorboard writer"""
# currently when using add_scalars,
# torch.utils.add_scalars makes each timer its own run, which
# polutes the runs list, so we just add each as a scalar
assert normalizer > 0.0
for name in names:
value = self.timers[name].elapsed(reset=reset) / normalizer
writer.add_scalar(name + "-time", value, iteration)
def log(self, names, normalizer=1.0, reset=True):
"""Log a group of timers."""
assert normalizer > 0.0
string = "time (ms)"
for name in names:
elapsed_time = self.timers[name].elapsed(reset=reset) * 1000.0 / normalizer
string += " | {}: {:.2f}".format(name, elapsed_time)
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == (torch.distributed.get_world_size() - 1):
print(string, flush=True)
else:
print(string, flush=True)
| GeneSplice-main | GeneSplice/apex/apex/transformer/pipeline_parallel/_timers.py |
from apex.transformer.pipeline_parallel.schedules import get_forward_backward_func
from apex.transformer.pipeline_parallel.schedules.common import build_model
__all__ = [
"get_forward_backward_func",
"build_model",
]
| GeneSplice-main | GeneSplice/apex/apex/transformer/pipeline_parallel/__init__.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for pipeline model parallel."""
from typing import Optional, List, Union, Tuple
import torch
from torch.nn.parallel import DistributedDataParallel
from apex.multi_tensor_apply import multi_tensor_applier
from apex.transformer import parallel_state
from apex.transformer.enums import ModelType
from apex.transformer.microbatches import build_num_microbatches_calculator
from apex.transformer.pipeline_parallel._timers import _Timers
if multi_tensor_applier.available:
import amp_C
_GLOBAL_ARGS = None
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = None
_GLOBAL_TOKENIZER = None
_GLOBAL_TENSORBOARD_WRITER = None
_GLOBAL_AUTORESUME = None
_GLOBAL_TIMERS = None
Shape = Union[List[int], torch.Size]
def listify_model(model: Union[torch.nn.Module, List[torch.nn.Module]]) -> List[torch.nn.Module]:
if isinstance(model, list):
return model
return [model]
def _ensure_var_is_initialized(var, name):
"""Make sure the input variable is not None."""
assert var is not None, "{} is not initialized.".format(name)
def _ensure_var_is_not_initialized(var, name):
"""Make sure the input variable is not None."""
assert var is None, "{} is already initialized.".format(name)
def setup_microbatch_calculator(
rank: int,
rampup_batch_size: Optional[List[int]],
global_batch_size: int,
micro_batch_size: int,
data_parallel_size: int,
) -> None:
global _GLOBAL_NUM_MICROBATCHES_CALCULATOR
_ensure_var_is_not_initialized(_GLOBAL_NUM_MICROBATCHES_CALCULATOR, 'num microbatches calculator')
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = build_num_microbatches_calculator(
rank, rampup_batch_size, global_batch_size, micro_batch_size, data_parallel_size)
def _reconfigure_microbatch_calculator(
rank: int,
rampup_batch_size: Optional[List[int]],
global_batch_size: int,
micro_batch_size: int,
data_parallel_size: int,
) -> None:
if torch.distributed.get_rank() == 0:
import warnings
warnings.warn("This function is only for unittest")
global _GLOBAL_NUM_MICROBATCHES_CALCULATOR
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = build_num_microbatches_calculator(
rank, rampup_batch_size, global_batch_size, micro_batch_size, data_parallel_size)
def get_micro_batch_size():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.micro_batch_size
def get_num_microbatches():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get()
def get_current_global_batch_size():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get_current_global_batch_size()
def update_num_microbatches(consumed_samples, consistency_check=True):
_GLOBAL_NUM_MICROBATCHES_CALCULATOR.update(consumed_samples, consistency_check)
# note (mkozuki): Comment out in favor of `get_kth_microbatch`
def _split_batch_into_microbatch(
batch: List[torch.Tensor],
*,
_micro_batch_size: Optional[int] = None,
_global_batch_size: Optional[int] = None,
) -> List[List[torch.Tensor]]:
micro_batch_size = _micro_batch_size
global_batch_size = _global_batch_size
if micro_batch_size is None:
micro_batch_size = get_micro_batch_size()
if global_batch_size is None:
global_batch_size = get_current_global_batch_size()
for i in range(0, global_batch_size, micro_batch_size):
yield [x[i * micro_batch_size:(i + 1) * micro_batch_size] for x in batch]
# TODO(mkozuki): Support non-tensor local minibatches?
def get_kth_microbatch(batch: Optional[List[torch.Tensor]], k: int) -> List[torch.Tensor]:
"""Create a list of microbatches from a list of local minibatches.
This function creates a list of `k`th microbatches from a list of local minibatches.
`a local minibatch` consists of `global_batch_size / data_parallel_size` samples.
"""
if batch is None or not isinstance(batch, (List, Tuple)):
return batch
micro_batch_size = get_micro_batch_size()
start = k * micro_batch_size
end = start + micro_batch_size
microbatch = list()
for x in batch:
size = x.size(0)
assert size > start and size >= end
microbatch.append(x[start:end])
assert len(microbatch) > 0
return microbatch
def get_autoresume():
return _GLOBAL_AUTORESUME
def _set_timers():
"""Initialize timers."""
global _GLOBAL_TIMERS
_ensure_var_is_not_initialized(_GLOBAL_TIMERS, "timers")
_GLOBAL_TIMERS = _Timers()
def get_timers():
"""Return timers."""
_ensure_var_is_initialized(_GLOBAL_TIMERS, "timers")
return _GLOBAL_TIMERS
def print_rank_0(message: str) -> None:
"""If distributed is initialized, print only on rank 0."""
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print(message, flush=True)
else:
print(message, flush=True)
def is_last_rank():
return torch.distributed.get_rank() == (torch.distributed.get_world_size() - 1)
def print_rank_last(message):
"""If distributed is initialized, print only on last rank."""
if torch.distributed.is_initialized():
if is_last_rank():
print(message, flush=True)
else:
print(message, flush=True)
def param_is_not_shared(param: torch.nn.Parameter) -> bool:
return getattr(param, "shared", False)
def unwrap_model(model, module_instances=(DistributedDataParallel,)):
return_list = True
if not isinstance(model, list):
model = [model]
return_list = False
unwrapped_model = []
for model_module in model:
while isinstance(model_module, module_instances):
model_module = model_module.module
unwrapped_model.append(model_module)
if not return_list:
return unwrapped_model[0]
return unwrapped_model
def get_model_type(
model: torch.nn.Module,
) -> ModelType:
"""Get `model_type` of `model`.
If ``model`` doesn't have ``model_type`` attribute, return ``ModelType.encoder_or_decoder``.
Args:
model
"""
return getattr(unwrap_model(model), "model_type", ModelType.encoder_or_decoder)
def calc_params_l2_norm(model: torch.nn.Module, bf16: bool):
"""Calculate l2 norm of parameters """
# args = get_args()
if not isinstance(model, list):
model = [model]
# Remove duplicate params.
params_data = []
for model_ in model:
for param in model_.parameters():
is_not_shared = param_is_not_shared(param)
is_not_tp_duplicate = parallel_state.param_is_not_tensor_parallel_duplicate(param)
if is_not_shared and is_not_tp_duplicate:
if bf16:
params_data.append(param.data.float())
else:
params_data.append(param.data)
# Calculate norm
dummy_overflow_buf = torch.cuda.IntTensor([0])
norm, _ = multi_tensor_applier(
amp_C.multi_tensor_l2norm, dummy_overflow_buf, [params_data], False # no per-parameter norm
)
norm_2 = norm * norm
# Sum across all model-parallel GPUs.
torch.distributed.all_reduce(
norm_2, op=torch.distributed.ReduceOp.SUM, group=parallel_state.get_model_parallel_group()
)
return norm_2.item() ** 0.5
def average_losses_across_data_parallel_group(losses):
"""Reduce a tensor of losses across all GPUs."""
averaged_losses = torch.cat([loss.clone().detach().view(1) for loss in losses])
torch.distributed.all_reduce(averaged_losses, group=parallel_state.get_data_parallel_group())
averaged_losses = averaged_losses / torch.distributed.get_world_size(
group=parallel_state.get_data_parallel_group()
)
return averaged_losses
def report_memory(name):
"""Simple GPU memory report."""
mega_bytes = 1024.0 * 1024.0
string = name + " memory (MB)"
string += " | allocated: {}".format(torch.cuda.memory_allocated() / mega_bytes)
string += " | max allocated: {}".format(torch.cuda.max_memory_allocated() / mega_bytes)
string += " | reserved: {}".format(torch.cuda.memory_reserved() / mega_bytes)
string += " | max reserved: {}".format(torch.cuda.max_memory_reserved() / mega_bytes)
if parallel_state.get_data_parallel_rank() == 0:
print("[Rank {}] {}".format(torch.distributed.get_rank(), string), flush=True)
def print_params_min_max_norm(optimizer, iteration):
"""Print min, max, and norm of all parameters."""
index = 0
rank = torch.distributed.get_rank()
string = "iteration, rank, index, tensor-model-parallel, min, max, norm\n"
optimizer_ = optimizer.optimizer
for param_group in optimizer_.param_groups:
for param in param_group["params"]:
index += 1
min_ = param.data.min()
max_ = param.data.max()
norm = torch.linalg.norm(param.data)
string += "{:7d}, {:4d}, {:4d}, {:2d}, ".format(
iteration, rank, index, int(param.tensor_model_parallel)
)
string += "{:.6E}, {:.6E}, {:.6E}\n".format(min_, max_, norm)
print(string, flush=True)
# NOTE (mkozuki): APEX doesn't have anything equivalent for
# `_GLOBAL_ADLR_AUTORESUME` like Megatron-LM.
# def check_adlr_autoresume_termination(iteration, model, optimizer, lr_scheduler, save: bool):
# """Check for autoresume signal and exit if it is received."""
# from apex.ppu.checkpointing import save_checkpoint
#
# autoresume = get_adlr_autoresume()
# # Add barrier to ensure consistency.
# torch.distributed.barrier()
# if autoresume.termination_requested():
# if save:
# save_checkpoint(iteration, model, optimizer, lr_scheduler)
# print_rank_0(">>> autoresume termination request found!")
# if torch.distributed.get_rank() == 0:
# autoresume.request_resume()
# print_rank_0(">>> training terminated. Returning")
# sys.exit(0)
def get_ltor_masks_and_position_ids(
data, eod_token, reset_position_ids, reset_attention_mask, eod_mask_loss
):
"""Build masks and position id for left to right model."""
# Extract batch size and sequence length.
micro_batch_size, seq_length = data.size()
# Attention mask (lower triangular).
if reset_attention_mask:
att_mask_batch = micro_batch_size
else:
att_mask_batch = 1
attention_mask = torch.tril(
torch.ones((att_mask_batch, seq_length, seq_length), device=data.device)
).view(att_mask_batch, 1, seq_length, seq_length)
# Loss mask.
loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device)
if eod_mask_loss:
loss_mask[data == eod_token] = 0.0
# Position ids.
position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
position_ids = position_ids.unsqueeze(0).expand_as(data)
# We need to clone as the ids will be modifed based on batch index.
if reset_position_ids:
position_ids = position_ids.clone()
if reset_position_ids or reset_attention_mask:
# Loop through the batches:
for b in range(micro_batch_size):
# Find indecies where EOD token is.
eod_index = position_ids[b, data[b] == eod_token]
# Detach indecies from positions if going to modify positions.
if reset_position_ids:
eod_index = eod_index.clone()
# Loop through EOD indecies:
prev_index = 0
for j in range(eod_index.size()[0]):
i = eod_index[j]
# Mask attention loss.
if reset_attention_mask:
attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0
# Reset positions.
if reset_position_ids:
position_ids[b, (i + 1) :] -= i + 1 - prev_index
prev_index = i + 1
# Convert attention mask to binary:
attention_mask = attention_mask < 0.5
return attention_mask, loss_mask, position_ids
| GeneSplice-main | GeneSplice/apex/apex/transformer/pipeline_parallel/utils.py |
# coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(mkozuki): Consider removing `timers`.
from functools import reduce
import operator
from typing import Union, Optional, Tuple
import torch
from apex.transformer import parallel_state
from apex.transformer.log_util import get_transformer_logger
from apex.transformer.utils import split_tensor_into_1d_equal_chunks
from apex.transformer.utils import gather_split_1d_tensor
from apex.transformer.pipeline_parallel.utils import Shape
from apex.transformer.pipeline_parallel._timers import _Timers
_logger = get_transformer_logger(__name__)
class FutureTensor:
def __init__(self, tensor: torch.Tensor, waitfunc):
self.tensor = tensor
self.waitfunc = waitfunc
def get(self):
if self.waitfunc is not None:
res = self.waitfunc()
if isinstance(res, torch.Tensor):
self.tensor = res
self.waitfunc = None
return self.tensor
def _run_p2pops(
tensor_send_prev: Union[torch.Tensor, None],
tensor_send_next: Union[torch.Tensor, None],
tensor_recv_prev: Union[torch.Tensor, None],
tensor_recv_next: Union[torch.Tensor, None],
async_comm: bool = False,
overlap_p2p_comm: bool = False,
batch_p2p_comm: bool = True,
):
p2p_group = parallel_state.get_pipeline_model_parallel_group()
default_group = parallel_state.get_model_parallel_group()
need_to_sync = p2p_group.name() != default_group.name()
reqs = []
ops = []
if batch_p2p_comm and p2p_group.name() == "nccl":
if tensor_send_prev is not None:
send_prev_op = torch.distributed.P2POp(
op=torch.distributed.isend,
tensor=tensor_send_prev,
peer=parallel_state.get_pipeline_model_parallel_prev_rank(),
group=p2p_group,
)
ops.append(send_prev_op)
if tensor_recv_prev is not None:
recv_prev_op = torch.distributed.P2POp(
op=torch.distributed.irecv,
tensor=tensor_recv_prev,
peer=parallel_state.get_pipeline_model_parallel_prev_rank(),
group=p2p_group,
)
ops.append(recv_prev_op)
if tensor_send_next is not None:
send_next_op = torch.distributed.P2POp(
op=torch.distributed.isend,
tensor=tensor_send_next,
peer=parallel_state.get_pipeline_model_parallel_next_rank(),
group=p2p_group,
)
ops.append(send_next_op)
if tensor_recv_next is not None:
recv_next_op = torch.distributed.P2POp(
op=torch.distributed.irecv,
tensor=tensor_recv_next,
peer=parallel_state.get_pipeline_model_parallel_next_rank(),
group=p2p_group,
)
ops.append(recv_next_op)
if len(ops) > 0:
# sync before communication if needed
if need_to_sync:
torch.cuda.synchronize()
reqs = torch.distributed.batch_isend_irecv(ops)
else:
# sync before communication if needed
if need_to_sync and any([
tensor_send_prev is not None, tensor_recv_prev is not None,
tensor_send_next is not None, tensor_recv_next is not None]):
torch.cuda.synchronize()
if tensor_send_prev is not None:
send_prev_req = torch.distributed.isend(
tensor=tensor_send_prev,
dst=parallel_state.get_pipeline_model_parallel_prev_rank(),
group=p2p_group,
)
reqs.append(send_prev_req)
if tensor_recv_prev is not None:
recv_prev_req = torch.distributed.irecv(
tensor=tensor_recv_prev,
src=parallel_state.get_pipeline_model_parallel_prev_rank(),
group=p2p_group,
)
reqs.append(recv_prev_req)
if tensor_send_next is not None:
send_next_req = torch.distributed.isend(
tensor=tensor_send_next,
dst=parallel_state.get_pipeline_model_parallel_next_rank(),
group=p2p_group,
)
reqs.append(send_next_req)
if tensor_recv_next is not None:
recv_next_op = torch.distributed.irecv(
tensor=tensor_recv_next,
src=parallel_state.get_pipeline_model_parallel_next_rank(),
group=p2p_group,
)
reqs.append(recv_next_op)
if len(reqs) > 0:
if overlap_p2p_comm:
return (None, None, None, None, reqs)
if async_comm:
if len(ops) == 0 or len(reqs) == len(ops):
tensor_send_prev_req = None if tensor_send_prev is None else reqs.pop(0)
tensor_recv_prev_req = None if tensor_recv_prev is None else reqs.pop(0)
tensor_send_next_req = None if tensor_send_next is None else reqs.pop(0)
tensor_recv_next_req = None if tensor_recv_next is None else reqs.pop(0)
elif len(reqs) == 1:
tensor_send_prev_req = None if tensor_send_prev is None else reqs[0]
tensor_recv_prev_req = None if tensor_recv_prev is None else reqs[0]
tensor_send_next_req = None if tensor_send_next is None else reqs[0]
tensor_recv_next_req = None if tensor_recv_next is None else reqs[0]
else:
assert False, "failed to manage p2p requests and handles"
return (tensor_send_prev_req, tensor_recv_prev_req, tensor_send_next_req, tensor_recv_next_req, None)
else:
for req in reqs:
req.wait()
return (None, None, None, None, None)
return (None, None, None, None, None)
# TODO(mkozuki): Check if it's possible to sunset `override_scatter_gather_tensors_in_pipeline`.
# TODO(mkozuki): Think about if it's possible to push some logic and arguments e.g.
# `scatter_gather_tensors_in_pipeline`, `sequence_parallel_enabled`, and
# `override_scatter_gather_tensors_in_pipeline` # to the user of
# apex.transformer forward_backwardfunctions.
def _communicate(
tensor_send_next: Optional[torch.Tensor],
tensor_send_prev: Optional[torch.Tensor],
recv_prev: bool,
recv_next: bool,
tensor_shape: Optional[Shape] = None,
override_scatter_gather_tensors_in_pipeline: bool = False,
dtype_: Optional[torch.dtype] = None,
*,
scatter_gather_tensors_in_pipeline: bool = True,
params_dtype: Optional[torch.dtype] = None,
fp32_residual_connection: bool = False,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
overlap_p2p_comm: bool = False,
batch_p2p_comm: bool = True,
) -> Tuple[Union[torch.Tensor, FutureTensor, None], Union[torch.Tensor, FutureTensor, None]]:
"""Base function for communication of tensors between stages.
.. note::
Reference https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/blob/cfd2e2160700b7f2c1bf35298ac14bc341f4c759/megatron/p2p_communication.py#L24-L159
dtype logic: If none of ``dtype_``, ``params_dtype``, ``fp32_residual_connection`` is specified,
torch.float32 is used.
See https://github.com/NVIDIA/Megatron-LM/blob/d41696840ed0a7edb7e0499eb82a48ae112d9bb3/megatron/arguments.py#L145-L159
for the details of arguments of ``dtype_``, ``params_dtype``, ``fp32_residual_connection``.
Args:
tensor_send_next: tensor to send to next rank (no tensor sent if set to None).
tensor_send_prev: tensor to send to prev rank (no tensor sent if set to None).
recv_prev: boolean for whether tensor should be received from previous rank.
recv_next: boolean for whether tensor should be received from next rank.
tensor_shape: optional, use when the input sequence contains less tokens than the default sequence length
override_scatter_gather_tensors_in_pipeline:
optional, this is used when tensor_shape is provided to override scatter gather tensors
dtype_: This is used when tensor_shape is provided and what is the type of tensor_shape
Keyword args:
scatter_gather_tensors_in_pipeline: Optional. If :obj:`True`, use scatter/gather to optimize communication of tensors.
params_dtype: Optional and legacy. Defaults to torch.float. If you manually call `.half()` or `.bfloat16()` on
your model deliberately, pass this argument.
fp32_residual_connection: Optional. If :obj:`True`, move residual connections to fp32.
sequence_parallel_enabled: Set to :obj:`True` if sequence parallel is enabled.
This argument is here for consistency with Megatron-LM.
This argument has an effect on the communication optimization, not on tensor_shape update.
sync_batch_comm: If :obj:`False`, disable cuda synchronization after the batched communication.
To disable, https://github.com/pytorch/pytorch/pull/82450 would be required.
overlap_p2p_comm: If :obj:`True`, returns cuda wait handles to scheduler instead of completing
the communication within the p2p transfer API instance. The scheduler manages the communication completion
to overlap with computation.
batch_p2p_comm: If :obj:`True`, use the batched send and receive api to conduct the communication of
a collection of send and receive operations between peer. If :obj:`False`, conduct each send and recv operation
individually.
Returns:
tuple containing
- tensor_recv_prev: `torch.Tensor` if `recv_prev` is :obj:`True`, `None` otherwise.
- tensor_recv_next: `torch.Tensor` if `recv_next` is :obj:`True`, `None` otherwise.
"""
if async_comm and sequence_parallel_enabled:
import warnings # NOQA
class ExperimentalWarning(UserWarning): pass # NOQA
warnings.warn(
"The combination of `async_comm` and `sequence_parallel_enabled` is not well tested.",
ExperimentalWarning,
)
# Create placeholder tensors for receive in forward and backward directions if needed.
tensor_recv_prev = None
tensor_recv_next = None
if tensor_shape is None:
# In megatron, `tensor_shape` is set to `(args.seq_length, args.micro_batch_size, args.hidden_size)`
raise RuntimeError(
"`tensor_shape` must be specified. Common `tensor_shape` is `(seq_length, micro_batch_size, hidden_size)`")
tensor_parallel_size = parallel_state.get_tensor_model_parallel_world_size()
override_scatter_gather_tensors_in_pipeline_ = False
# TODO(mkozuki): Demystify hardcode False of `scatter_gather_tensors_in_pipeline` and add a testcase if possible.
# NOTE(mkozuki): This is super strange and doesn't make sense to me. I have no idea what is happening here.
# However, I can say that this hardcoding override is necessary for sequence parallel in nemo megatron to work.
# I've not managed to reproduce the hang using standalone GPT with sequence parallel.
# The hang in NeMo Megatron happens in the 3rd iteration, the last iteration of stead phase inside
# forward_backward_pipelining_without_interleaving, pipeline parallel rank of 0 (tensor model parallel world
# size of 2 and pipeline model parallel world size of 2). The commit then of APEX and NeMo were
# https://github.com/NVIDIA/apex/pull/1396/commits/3060c98dd8ba42abf7702ea9d2cff0f39ea74f45 and
# https://github.com/NVIDIA/NeMo/pull/4232/commits/1cb32dfca2ab9b20f53ebdb84476c34cb42f0205.
# The PyTorch version was 1.13.0a0+git2d354cd, for what is worth.
# Currently, indiscriminately this is set to `False`, which can lead to an unexpected performance regression
# for non sequence parallel case.
scatter_gather_tensors_in_pipeline = False
if scatter_gather_tensors_in_pipeline and not sequence_parallel_enabled:
tensor_chunk_size = int(reduce(operator.mul, tensor_shape, 1))
if tensor_chunk_size % tensor_parallel_size == 0:
tensor_chunk_shape = [tensor_chunk_size // tensor_parallel_size]
else:
tensor_chunk_shape = tensor_shape
override_scatter_gather_tensors_in_pipeline_ = True
else:
tensor_chunk_shape = tensor_shape
# The dtype logic below is copied from NVIDIA/Megatron-LM repo:
# https://github.com/NVIDIA/Megatron-LM/blob/d41696840ed0a7edb7e0499eb82a48ae112d9bb3/megatron/p2p_communication.py#L74-L81
dtype = params_dtype or torch.float
if fp32_residual_connection:
dtype = torch.float
requires_grad = True
if dtype_ is not None:
dtype = dtype_
# TODO(mkozuki): Figure out why this logic of requires_grad isn't working
# when sequence_parallel_enabled=True. Otherwise, `x.retain_grad()` of
# https://github.com/crcrpar/apex/blob/069832078a652b4bd8a99db84faf953a81415ab3/apex/transformer/pipeline_parallel/schedules/common.py#L360
# fails.
# requires_grad = False
if recv_prev:
tensor_recv_prev = torch.empty(
tensor_chunk_shape,
requires_grad=requires_grad,
device=torch.cuda.current_device(),
dtype=dtype,
)
if recv_next:
tensor_recv_next = torch.empty(
tensor_chunk_shape,
requires_grad=requires_grad,
device=torch.cuda.current_device(),
dtype=dtype,
)
# Split tensor into smaller chunks if using scatter-gather optimization.
scatter_gather_optimization_doable = (
not override_scatter_gather_tensors_in_pipeline_
and scatter_gather_tensors_in_pipeline
and not sequence_parallel_enabled
)
if scatter_gather_optimization_doable:
if tensor_send_next is not None:
tensor_send_next = split_tensor_into_1d_equal_chunks(tensor_send_next)
if tensor_send_prev is not None:
tensor_send_prev = split_tensor_into_1d_equal_chunks(tensor_send_prev)
# Send tensors in both the forward and backward directions as appropriate.
tensor_send_prev_req, tensor_recv_prev_req, tensor_send_next_req, tensor_recv_next_req, wait_handles = _run_p2pops(
tensor_send_prev, tensor_send_next, tensor_recv_prev, tensor_recv_next, async_comm, overlap_p2p_comm, batch_p2p_comm)
if async_comm:
tensor_recv_prev_waitfunc = None
tensor_recv_next_waitfunc = None
# TODO: investigate whether this is necessary for correctness (ref: https://github.com/pytorch/pytorch/issues/38642)
# see also: sync added for async_comm callbacks below in gather_recv_prev_wait and gather_recv_next_wait
if tensor_recv_prev_req is not None:
def tensor_recv_prev_wait():
tensor_recv_prev_req.wait()
torch.cuda.synchronize()
tensor_recv_prev_waitfunc = tensor_recv_prev_wait
if tensor_recv_next_req is not None:
def tensor_recv_next_wait():
tensor_recv_next_req.wait()
torch.cuda.synchronize()
tensor_recv_next_waitfunc = tensor_recv_next_wait
else:
if sync_batch_comm:
# To protect against race condition when using batch_isend_irecv().
torch.cuda.synchronize()
# If using scatter-gather optimization, gather smaller chunks.
if scatter_gather_optimization_doable:
if not async_comm:
if recv_prev:
tensor_recv_prev = (
gather_split_1d_tensor(tensor_recv_prev)
.view(tensor_shape)
.requires_grad_()
)
if recv_next:
tensor_recv_next = (
gather_split_1d_tensor(tensor_recv_next)
.view(tensor_shape)
.requires_grad_()
)
else:
def gather_recv_prev_wait():
tensor_recv_prev_req.wait()
# From @Deepak's PR https://github.com/NVIDIA/Megatron-LM/commit/27fc468964064eeb33b703c9a0b2af938d80dd14
# A sync seems to be needed before gather otherwise losses jump around e.g., in run_gpt_minimal_test
torch.cuda.synchronize()
return (
gather_split_1d_tensor(tensor_recv_prev)
.view(tensor_shape)
.requires_grad_()
)
def gather_recv_next_wait():
tensor_recv_next_req.wait()
torch.cuda.synchronize()
return (
gather_split_1d_tensor(tensor_recv_next)
.view(tensor_shape)
.requires_grad_()
)
tensor_recv_prev_waitfunc = gather_recv_prev_wait
tensor_recv_next_waitfunc = gather_recv_next_wait
if async_comm:
future_tensor_recv_prev = None
future_tensor_recv_next = None
if tensor_recv_prev is not None:
future_tensor_recv_prev = FutureTensor(tensor_recv_prev, tensor_recv_prev_waitfunc)
if tensor_recv_next is not None:
future_tensor_recv_next = FutureTensor(tensor_recv_next, tensor_recv_next_waitfunc)
return future_tensor_recv_prev, future_tensor_recv_next, None
return tensor_recv_prev, tensor_recv_next, wait_handles
def recv_forward(
tensor_shape: Shape,
override_scatter_gather_tensors_in_pipeline: bool = False,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> Union[torch.Tensor, FutureTensor, None]:
"""Receive tensor from previous rank in pipeline (forward receive)."""
if parallel_state.is_pipeline_first_stage():
return None
# if timers is not None:
# timers("forward-recv").start()
input_tensor, _, _ = _communicate(
tensor_send_next=None,
tensor_send_prev=None,
recv_prev=True,
recv_next=False,
tensor_shape=tensor_shape,
override_scatter_gather_tensors_in_pipeline=override_scatter_gather_tensors_in_pipeline,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("forward-recv").stop()
return input_tensor
def recv_backward(
tensor_shape: Shape = None,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> Union[torch.Tensor, FutureTensor, None]:
"""Receive tensor from next rank in pipeline (backward receive)."""
if parallel_state.is_pipeline_last_stage():
return None
# if timers is not None:
# timers("backward-recv").start()
_, output_tensor_grad, _ = _communicate(
tensor_send_next=None,
tensor_send_prev=None,
recv_prev=False,
recv_next=True,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("backward-recv").stop()
return output_tensor_grad
def send_forward(
output_tensor: torch.Tensor,
override_scatter_gather_tensors_in_pipeline: bool = False,
tensor_shape: Shape = None,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> None:
"""Send tensor to next rank in pipeline (forward send)."""
if parallel_state.is_pipeline_last_stage():
return
# if timers is not None:
# timers("forward-send").start()
_communicate(
tensor_send_next=output_tensor,
tensor_send_prev=None,
recv_prev=False,
recv_next=False,
override_scatter_gather_tensors_in_pipeline=override_scatter_gather_tensors_in_pipeline,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("forward-send").stop()
def send_backward(
input_tensor_grad: torch.Tensor,
tensor_shape: Shape,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> None:
"""Send tensor to previous rank in pipeline (backward send)."""
if parallel_state.is_pipeline_first_stage():
return
# if timers is not None:
# timers("backward-send").start()
_communicate(
tensor_send_next=None,
tensor_send_prev=input_tensor_grad,
recv_prev=False,
recv_next=False,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("backward-send").stop()
def send_forward_recv_backward(
output_tensor: torch.Tensor,
tensor_shape: Shape,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> Union[torch.Tensor, FutureTensor, None]:
"""Batched send and recv with next rank in pipeline."""
if parallel_state.is_pipeline_last_stage():
return None
# if timers is not None:
# timers("forward-send-backward-recv").start()
_, output_tensor_grad, _ = _communicate(
tensor_send_next=output_tensor,
tensor_send_prev=None,
recv_prev=False,
recv_next=True,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("forward-send-backward-recv").stop()
return output_tensor_grad
def send_backward_recv_forward(
input_tensor_grad: torch.Tensor,
tensor_shape: Shape,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> Union[torch.Tensor, FutureTensor, None]:
"""Batched send and recv with previous rank in pipeline."""
if parallel_state.is_pipeline_first_stage():
return None
# if timers is not None:
# timers("backward-send-forward-recv").start()
input_tensor, _, _ = _communicate(
tensor_send_next=None,
tensor_send_prev=input_tensor_grad,
recv_prev=True,
recv_next=False,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("backward-send-forward-recv").stop()
return input_tensor
def send_forward_recv_forward(
output_tensor: torch.Tensor,
recv_prev: bool,
tensor_shape: Shape,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
overlap_p2p_comm: bool = False,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> Union[torch.Tensor, FutureTensor]:
"""Batched recv from previous rank and send to next rank in pipeline."""
# if timers is not None:
# timers("forward-send-forward-recv").start()
input_tensor, _, wait_handles = _communicate(
tensor_send_next=output_tensor,
tensor_send_prev=None,
recv_prev=recv_prev,
recv_next=False,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
overlap_p2p_comm=overlap_p2p_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("forward-send-forward-recv").stop()
if overlap_p2p_comm:
return input_tensor, wait_handles
return input_tensor
def send_backward_recv_backward(
input_tensor_grad: torch.Tensor,
recv_next: bool,
tensor_shape: Shape,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
overlap_p2p_comm: bool = False,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> Union[torch.Tensor, FutureTensor]:
"""Batched recv from next rank and send to previous rank in pipeline."""
# if timers is not None:
# timers("backward-send-backward-recv").start()
_, output_tensor_grad, wait_handles = _communicate(
tensor_send_next=None,
tensor_send_prev=input_tensor_grad,
recv_prev=False,
recv_next=recv_next,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
overlap_p2p_comm=overlap_p2p_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("backward-send-backward-recv").stop()
if overlap_p2p_comm:
return output_tensor_grad, wait_handles
return output_tensor_grad
def send_forward_backward_recv_forward_backward(
output_tensor: torch.Tensor,
input_tensor_grad: torch.Tensor,
recv_prev: bool,
recv_next: bool,
tensor_shape: Shape,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
overlap_p2p_comm: bool = False,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> Tuple[Union[torch.Tensor, FutureTensor], Union[torch.Tensor, FutureTensor]]:
"""Batched send and recv with previous and next ranks in pipeline."""
# if timers is not None:
# timers("forward-backward-send-forward-backward-recv").start()
input_tensor, output_tensor_grad, wait_handles = _communicate(
tensor_send_next=output_tensor,
tensor_send_prev=input_tensor_grad,
recv_prev=recv_prev,
recv_next=recv_next,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
overlap_p2p_comm=overlap_p2p_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("forward-backward-send-forward-backward-recv").stop()
if overlap_p2p_comm:
return input_tensor, output_tensor_grad, wait_handles
return input_tensor, output_tensor_grad
| GeneSplice-main | GeneSplice/apex/apex/transformer/pipeline_parallel/p2p_communication.py |
import contextlib
from typing import Any, List, Optional, Sequence, Union
import warnings
import torch
from apex.transformer import parallel_state
from apex.transformer.enums import ModelType
from apex.transformer.pipeline_parallel import p2p_communication
from apex.transformer.pipeline_parallel.p2p_communication import FutureTensor
from apex.transformer.pipeline_parallel.utils import get_kth_microbatch
from apex.transformer.pipeline_parallel.utils import listify_model
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
from apex.transformer.pipeline_parallel.utils import get_model_type
from apex.transformer.pipeline_parallel.schedules.common import Batch
from apex.transformer.pipeline_parallel.schedules.common import FwdStepFunc
from apex.transformer.pipeline_parallel.schedules.common import backward_step
from apex.transformer.pipeline_parallel.schedules.common import forward_step
from apex.transformer.pipeline_parallel.schedules.common import free_output_tensor
from apex.transformer.log_util import get_transformer_logger
__all__ = ["forward_backward_pipelining_without_interleaving"]
_logger = get_transformer_logger(__name__)
def get_tensor_shapes(
rank: int,
model_type: ModelType,
*,
tensor_shape: Union[List[int], torch.Size],
decoder_sequence_length: Optional[int] = None,
sequence_parallel_enabled: bool = False,
) -> Sequence[Sequence[int]]:
"""Get tensors shapes
Args:
rank: pipeline parallel rank
model_type:
Keyword Args:
tensor_shape:
decoder_sequence_length:
sequence_parallel_enabled:
"""
# Determine right tensor sizes (based on position of rank with respect to split
# rank) and model size.
# Send two tensors if model is T5 and rank is in decoder stage:
# first tensor is decoder (pre-transpose),
# second tensor is encoder (post-transpose).
# If model is T5 and rank is at the boundary:
# send one tensor (post-transpose from encoder).
# Otherwise, send one tensor (pre-transpose).
assert (
len(tensor_shape) == 3
), f"`tensor_shape` should be [sequence_length, micro_batch_size, hidden_size] but {tensor_shape}"
sequence_length, micro_batch_size, hidden_size = tensor_shape
tensor_shapes = []
if sequence_parallel_enabled:
seq_length = sequence_length // parallel_state.get_tensor_model_parallel_world_size()
else:
seq_length = sequence_length
if model_type == ModelType.encoder_and_decoder:
if sequence_parallel_enabled:
dec_seq_length = decoder_sequence_length // parallel_state.get_tensor_model_parallel_world_size()
else:
dec_seq_length = decoder_sequence_length
if parallel_state.is_pipeline_stage_before_split(rank):
tensor_shapes.append((seq_length, micro_batch_size, hidden_size))
else:
tensor_shapes.append((dec_seq_length, micro_batch_size, hidden_size))
tensor_shapes.append((seq_length, micro_batch_size, hidden_size))
else:
tensor_shapes.append((seq_length, micro_batch_size, hidden_size))
return tensor_shapes
def recv_forward(
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
) -> List[Union[None, torch.Tensor, FutureTensor]]:
input_tensors = []
for tensor_shape in tensor_shapes:
if tensor_shape is None:
input_tensors.append(None)
else:
input_tensors.append(
p2p_communication.recv_forward(
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
)
return input_tensors
def recv_backward(
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
) -> List[Union[None, torch.Tensor, FutureTensor]]:
output_tensor_grads = []
for tensor_shape in tensor_shapes:
if tensor_shape is None:
output_tensor_grads.append(None)
else:
output_tensor_grads.append(
p2p_communication.recv_backward(
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
)
return output_tensor_grads
def send_forward(
output_tensors: Union[torch.Tensor, List[Union[None, torch.Tensor]]],
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
) -> None:
if not isinstance(output_tensors, list):
output_tensors = [output_tensors]
for (output_tensor, tensor_shape) in zip(output_tensors, tensor_shapes):
if tensor_shape is None:
continue
p2p_communication.send_forward(
output_tensor,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
def send_backward(
input_tensor_grads: Union[torch.Tensor, List[Union[None, torch.Tensor]]],
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
) -> None:
if not isinstance(input_tensor_grads, list):
input_tensor_grads = [input_tensor_grads]
for (input_tensor_grad, tensor_shape) in zip(input_tensor_grads, tensor_shapes):
if tensor_shape is None:
continue
p2p_communication.send_backward(
input_tensor_grad,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
def send_forward_recv_backward(
output_tensors: Union[torch.Tensor, List[Union[None, torch.Tensor]]],
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
) -> List[Union[None, torch.Tensor, FutureTensor]]:
if not isinstance(output_tensors, list):
output_tensors = [output_tensors]
output_tensor_grads = []
for (output_tensor, tensor_shape) in zip(output_tensors, tensor_shapes):
if tensor_shape is None:
output_tensor_grads.append(None)
continue
output_tensor_grad = p2p_communication.send_forward_recv_backward(
output_tensor,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
output_tensor_grads.append(output_tensor_grad)
return output_tensor_grads
def send_backward_recv_forward(
input_tensor_grads: Union[torch.Tensor, List[Union[None, torch.Tensor]]],
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
) -> List[Union[None, torch.Tensor, FutureTensor]]:
if not isinstance(input_tensor_grads, list):
input_tensor_grads = [input_tensor_grads]
input_tensors = []
for (input_tensor_grad, tensor_shape) in zip(input_tensor_grads, tensor_shapes):
if tensor_shape is None:
input_tensors.append(None)
continue
input_tensor = p2p_communication.send_backward_recv_forward(
input_tensor_grad,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
input_tensors.append(input_tensor)
return input_tensors
def forward_backward_pipelining_without_interleaving(
forward_step_func: FwdStepFunc,
batch: Optional[Batch],
model: Union[torch.nn.Module, List[torch.nn.Module]],
*,
forward_only: bool,
tensor_shape: Optional[Union[List[int], torch.Size]] = None,
decoder_sequence_length: Optional[int] = None,
dtype: Optional[torch.dtype] = None,
grad_scaler: Optional[torch.cuda.amp.GradScaler] = None,
disable_autocast: bool = False,
deallocate_pipeline_outputs: bool = False,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
custom_sync_context_handler: Optional[Any] = None,
custom_grad_sync_func: Optional[Any] = None,
sync_batch_comm: bool = True,
num_micro_batches_with_partial_activation_checkpoints: Optional[int] = None,
**kwargs,
) -> List[Union[torch.Tensor, Sequence[torch.Tensor]]]:
"""Run non-interleaved 1F1B schedule, with communication between pipeline stages.
This pipeline parallel scheduling consists of three steps:
1. warmup
2. 1F1B a.k.a. steady state
3. cooldown if not forward_only
Args:
forward_step_func: A function which takes a minibatch and model as its arguments and
returns model's forward output and the loss function.
The loss function is supposed to take one `torch.Tensor` and
return a `torch.Tensor` of loss and a dictionary of `str` and `torch.Tensor`.
batch: A minibatch, i.e., a list of `torch.Tensor`'s.
model: A `torch.nn.Module` or a list of `torch.nn.Module`.
Keyword args:
forward_only:
tensor_shape: Shape of tensor. The tensor is expected to be 3D and its order of dimension
is supposed to be ``(sequence, batch, hidden)``.
dtype: dtype used in p2p communication. If ``None`` (default value),
torch.float32 will be used even if ``autocast`` is enabled.
grad_scaler:
disable_autocast:
deallocate_pipeline_outputs: If :obj:`True`, free the data of the output tensor of
each pipeline stage. Experimental.
sequence_parallel_enabled: Set to :obj:`True` for this function to handle sequence length.
When :obj:`True`, the sequence length on each tensor model parallel rank is updated
to :math:`original\_sequence\_length / tensor\_model\_parallel\_world\_size`.
custom_sync_context_handler: Does nothing if ``None`` (default
value). Otherwise, a function to construct a context
manager that disable asynchronous gradient reductions.
Asynchronous gradient reductions are only enabled in the
first pipeline stage, during the last backward pass.
custom_grad_sync_func: Does nothing if ``None`` (default
value). Otherwise, a function to perform gradient
reductions. This is called in all pipeline stages except
the first, during the bubble overhead.
sync_batch_comm: If :obj:`False`, disable cuda synchronization after the batched communication.
To disable, https://github.com/pytorch/pytorch/pull/82450 would be required.
num_micro_batches_with_partial_activation_checkpoints: If :obj:`int`, set the number of
micro-batches checkpointing the activation of partial number of Transformer layers.
The rest of the micro-batch within the window of maximum outstanding micro-batch
backpropagations would checkpoint all Transformer layers.
Returns:
a list of loss `torch.Tensor`s if the last stage, empty list otherwise.
"""
# timers = get_timers()
if deallocate_pipeline_outputs:
warnings.warn(
"`deallocate_pipeline_outputs` is experimental and subject to change. "
"This option is not recommended."
)
model: List[torch.nn.Module] = listify_model(model)
if len(model) != 1:
msg = f"`model` is expected be a `nn.Module`, but {type(model)}"
raise RuntimeError(msg)
model: torch.nn.Module = model[0]
# Disable async grad reductions
if custom_sync_context_handler is not None:
sync_context_handler = custom_sync_context_handler
else:
sync_context_handler = contextlib.nullcontext
sync_context = None
def disable_grad_sync():
"""Disable asynchronous grad reductions"""
nonlocal sync_context
if sync_context is None:
sync_context = sync_context_handler()
sync_context.__enter__()
def enable_grad_sync():
"""Enable asynchronous grad reductions"""
nonlocal sync_context
if sync_context is not None:
sync_context.__exit__(None, None, None)
sync_context = None
disable_grad_sync()
# Compute number of warmup microbatches.
num_microbatches: int = get_num_microbatches()
num_warmup_microbatches: int = (
parallel_state.get_pipeline_model_parallel_world_size() - parallel_state.get_pipeline_model_parallel_rank() - 1
)
num_warmup_microbatches: int = min(num_warmup_microbatches, num_microbatches)
num_microbatches_remaining: int = num_microbatches - num_warmup_microbatches
# Checkpoint the activations of partial Transformer layers in a number of micro-batches
# within the maximum outstanding micro-batch backpropagations.
# Micro-batches with the ids less than 'num_micro_batches_with_partial_activation_checkpoints'
# checkpoint partial Transformer layers (or skip checkpointing) and
# the rest of micro-batches within a window of micro-batches checkpoint
# all Transformer layers. The window of micro-batches is set by the maximum
# outstanding backpropagations and becomes smaller at later pipeline stages.
# Please refer the appendix C in https://arxiv.org/pdf/2205.05198.pdf
max_outstanding_backprops = None
if num_micro_batches_with_partial_activation_checkpoints is not None:
max_outstanding_backprops = num_warmup_microbatches + 1
model_type = get_model_type(model)
rank: int = parallel_state.get_pipeline_model_parallel_rank()
recv_tensor_shapes: List[List[int]] = get_tensor_shapes(
rank - 1,
model_type,
tensor_shape=tensor_shape,
decoder_sequence_length=decoder_sequence_length,
sequence_parallel_enabled=sequence_parallel_enabled,
)
send_tensor_shapes: List[List[int]] = get_tensor_shapes(
rank,
model_type,
tensor_shape=tensor_shape,
decoder_sequence_length=decoder_sequence_length,
sequence_parallel_enabled=sequence_parallel_enabled,
)
_logger.info(
f"num_microbatches: {num_microbatches}, "
f"num_warmup_microbatches: {num_warmup_microbatches}, "
f"num_microbatches_remaining: {num_microbatches_remaining}"
)
# Input, output tensors only need to be saved when doing backward passes
input_tensors: List[Union[None, torch.Tensor]] = []
output_tensors: List[Union[None, torch.Tensor]] = []
losses_reduced: List[Union[None, torch.Tensor]] = []
###################################################################################################################
# Run warmup forward passes.
###################################################################################################################
_logger.info("Warmup")
for i in range(num_warmup_microbatches):
_logger.debug(f"warmup iter: {i} / {num_warmup_microbatches}")
_logger.debug("receive fwd")
# Decide to checkpoint all layers' activations of the current micro-batch
if max_outstanding_backprops is not None:
checkpoint_activations_micro_batch = (
i % max_outstanding_backprops >= num_micro_batches_with_partial_activation_checkpoints
)
else:
checkpoint_activations_micro_batch = None
input_tensor = recv_forward(
tensor_shapes=recv_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
cur_microbatch: Optional[torch.Tensor] = get_kth_microbatch(batch, i)
output_tensor = forward_step(
forward_step_func,
cur_microbatch,
model,
input_tensor,
losses_reduced,
dtype,
disable_autocast,
checkpoint_activations_micro_batch,
)
_logger.debug("send fwd")
send_forward(
output_tensor,
tensor_shapes=send_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
if not forward_only:
input_tensors.append(input_tensor)
output_tensors.append(output_tensor)
free_output_tensor(output_tensor, deallocate_pipeline_outputs)
# Before running 1F1B, need to receive first forward tensor.
# If all microbatches are run in warmup / cooldown phase, then no need to
# receive this tensor here.
if num_microbatches_remaining > 0:
_logger.debug("recv_forward before steady state start")
input_tensor: List[Union[None, torch.Tensor, FutureTensor]] = recv_forward(
tensor_shapes=recv_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sync_batch_comm=sync_batch_comm,
)
###################################################################################################################
# Run 1F1B in steady state.
###################################################################################################################
_logger.info("Steady phase")
for i in range(num_microbatches_remaining):
_logger.debug(f"steady iter: {i} / {num_microbatches_remaining}")
last_iteration: bool = i == (num_microbatches_remaining - 1)
# Decide to checkpoint all layers' activations of the current micro-batch
if max_outstanding_backprops is not None:
checkpoint_activations_micro_batch = (
((i+num_warmup_microbatches) % max_outstanding_backprops) >= num_micro_batches_with_partial_activation_checkpoints
)
else:
checkpoint_activations_micro_batch = None
cur_microbatch: Optional[torch.Tensor] = get_kth_microbatch(batch, i + num_warmup_microbatches)
output_tensor: Union[torch.Tensor, Sequence[torch.Tensor]] = forward_step(
forward_step_func,
cur_microbatch,
model,
input_tensor,
losses_reduced,
dtype,
disable_autocast,
checkpoint_activations_micro_batch,
)
if forward_only:
_logger.debug("send fwd")
send_forward(
output_tensor,
tensor_shapes=send_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
if not last_iteration:
_logger.debug("receive fwd (last iteration)")
input_tensor = recv_forward(
tensor_shapes=recv_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
else:
_logger.debug("send fwd & receive bwd")
output_tensor_grad = send_forward_recv_backward(
output_tensor,
tensor_shapes=send_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
# Add input_tensor and output_tensor to end of list.
input_tensors.append(input_tensor)
output_tensors.append(output_tensor)
free_output_tensor(output_tensor, deallocate_pipeline_outputs)
# Pop input_tensor and output_tensor from the start of the list for the backward pass.
input_tensor = input_tensors.pop(0)
output_tensor = output_tensors.pop(0)
input_tensor_grad = backward_step(
input_tensor,
output_tensor,
output_tensor_grad,
model_type=model_type,
grad_scaler=grad_scaler,
deallocate_pipeline_outputs=deallocate_pipeline_outputs,
)
if last_iteration:
input_tensor = None
_logger.debug("send bwd")
send_backward(
input_tensor_grad,
tensor_shapes=recv_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
else:
_logger.debug("send bwd and receive fwd")
input_tensor = send_backward_recv_forward(
input_tensor_grad,
tensor_shapes=recv_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
###################################################################################################################
# Run cooldown backward passes.
###################################################################################################################
_logger.info("Cooldown phase")
if not forward_only:
for i in range(num_warmup_microbatches):
_logger.debug(f"cooldown iter: {i} / {num_warmup_microbatches}")
if i == num_warmup_microbatches-1 and rank == 0:
# Async grad reduction in first pipeline stage, during
# last backward pass
enable_grad_sync()
input_tensor = input_tensors.pop(0)
output_tensor = output_tensors.pop(0)
_logger.debug("receive bwd")
output_tensor_grad = recv_backward(
tensor_shapes=send_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
input_tensor_grad = backward_step(
input_tensor,
output_tensor,
output_tensor_grad,
model_type=model_type,
grad_scaler=grad_scaler,
deallocate_pipeline_outputs=deallocate_pipeline_outputs,
)
_logger.debug("send bwd")
send_backward(
input_tensor_grad,
tensor_shapes=recv_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
# Grad reduction in all pipeline stages except the first, during
# the bubble overhead
enable_grad_sync()
if rank != 0 and custom_grad_sync_func is not None:
custom_grad_sync_func()
return losses_reduced
| GeneSplice-main | GeneSplice/apex/apex/transformer/pipeline_parallel/schedules/fwd_bwd_pipelining_without_interleaving.py |
import contextlib
from typing import List, Union, Optional
import torch
from apex.transformer.pipeline_parallel.utils import listify_model
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
from apex.transformer.pipeline_parallel.utils import get_kth_microbatch
from apex.transformer.pipeline_parallel.utils import get_model_type
from apex.transformer.pipeline_parallel.schedules.common import Batch
from apex.transformer.pipeline_parallel.schedules.common import FwdStepFunc
from apex.transformer.pipeline_parallel.schedules.common import forward_step
from apex.transformer.pipeline_parallel.schedules.common import backward_step
from apex.transformer.log_util import get_transformer_logger
_all__ = ["forward_backward_no_pipelining"]
_logger = get_transformer_logger(__name__)
def forward_backward_no_pipelining(
forward_step_func: FwdStepFunc,
batch: Batch,
model: Union[torch.nn.Module, List[torch.nn.Module]],
*,
forward_only: bool,
dtype: Optional[torch.dtype] = None,
grad_scaler: Optional[torch.cuda.amp.GradScaler] = None,
disable_autocast: bool = False,
custom_sync_context_handler=None,
**kwargs,
):
"""Run forward and backward passes with no pipeline parallelism (no inter-stage communication).
This pipeline parallel scheduling handles the last microbatch differently to synchronize gradients.
Args:
forward_step_func: A function which takes a minibatch and model as its arguments and
returns model's forward output and the loss function.
The loss function is supposed to take one `torch.Tensor` and
return a `torch.Tensor` of loss and a dictionary of `str` and `torch.Tensor`.
batch: A List of torch.Tensors
model: A `torch.nn.Module` or a list of `torch.nn.Module`.
Keyword args:
forward_only:
grad_scaler:
dtype:
disable_autocast: Turn off `enabled` flag of `torch.cuda.amp.autocast` if :obj:`True`.
Should be used when your forward and loss computation is in the autocast context to
avoid unnecesarily nest autocast context.
custom_sync_context_handler: Context manager to disable asynchronous gradient reductions.
**kwargs: Added to handle `tensor_shape` which has no effect on this function.
Returns:
a list of dictionaries of loss `torch.Tensor`s if the last stage, empty list otherwise.
"""
model = listify_model(model)
if len(model) != 1:
msg = f"`model` is expected be a `nn.Module`, but {type(model)}"
raise RuntimeError(msg)
model = model[0]
model_type = get_model_type(model)
if custom_sync_context_handler is not None:
context_handler = custom_sync_context_handler
elif isinstance(model, torch.nn.parallel.distributed.DistributedDataParallel):
context_handler = model.no_sync
else:
context_handler = contextlib.nullcontext
losses_reduced = []
input_tensor, output_tensor_grad = None, None
num_micro_batches = get_num_microbatches()
with context_handler():
for i in range(num_micro_batches - 1):
_logger.info(f"Iter {i} of {num_micro_batches - 1}")
cur_micro_batch = get_kth_microbatch(batch, i)
_logger.debug("Call `forward_step`")
output_tensor = forward_step(
forward_step_func,
cur_micro_batch,
model,
input_tensor,
losses_reduced,
dtype=dtype,
disable_autocast=disable_autocast,
)
if not forward_only:
_logger.debug("Call `backward_step`")
backward_step(
input_tensor,
output_tensor,
output_tensor_grad,
model_type=model_type,
grad_scaler=grad_scaler,
)
# Run computation for last microbatch out of context handler (want to
# synchronize gradients).
_logger.info("Cooldown")
_logger.debug("Call `forward_step`")
output_tensor = forward_step(
forward_step_func,
get_kth_microbatch(batch, num_micro_batches - 1),
model,
input_tensor,
losses_reduced,
dtype=dtype,
disable_autocast=disable_autocast,
)
if not forward_only:
_logger.debug("Call `backward_step`")
backward_step(
input_tensor,
output_tensor,
output_tensor_grad,
model_type=model_type,
grad_scaler=grad_scaler,
)
return losses_reduced
| GeneSplice-main | GeneSplice/apex/apex/transformer/pipeline_parallel/schedules/fwd_bwd_no_pipelining.py |
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import (
forward_backward_no_pipelining,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_with_interleaving import (
_forward_backward_pipelining_with_interleaving,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
forward_backward_pipelining_without_interleaving,
)
__all__ = [
"get_forward_backward_func",
]
class ExperimentalWarning(Warning):
pass
def get_forward_backward_func(
virtual_pipeline_model_parallel_size, pipeline_model_parallel_size,
):
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
if virtual_pipeline_model_parallel_size is not None:
if get_num_microbatches() % pipeline_model_parallel_size != 0:
msg = "number of microbatches is not divisible by pipeline-parallel size when using interleaved schedule"
raise RuntimeError(msg)
forward_backward_func = _forward_backward_pipelining_with_interleaving
else:
forward_backward_func = forward_backward_pipelining_without_interleaving
else:
forward_backward_func = forward_backward_no_pipelining
return forward_backward_func
| GeneSplice-main | GeneSplice/apex/apex/transformer/pipeline_parallel/schedules/__init__.py |
from typing import Any, Callable, Dict, List, Tuple, Union, Optional, Sequence
import torch
from torch.autograd.variable import Variable
from apex.normalization.fused_layer_norm import FusedLayerNorm
from apex.transformer import parallel_state
from apex.transformer.enums import ModelType
from apex.transformer.pipeline_parallel.p2p_communication import FutureTensor
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
from apex.transformer.pipeline_parallel.utils import listify_model
from apex.transformer.pipeline_parallel.utils import unwrap_model
from apex.transformer.pipeline_parallel.utils import get_model_type
from apex.transformer.tensor_parallel.layers import (
set_defaults_if_not_set_tensor_model_parallel_attributes,
)
from apex.transformer.log_util import get_transformer_logger
_logger = get_transformer_logger(__name__)
Batch = Union[torch.Tensor, FutureTensor, List[Union[torch.Tensor, FutureTensor]], Tuple[Union[torch.Tensor, FutureTensor], ...]]
LossFunc = Callable[[torch.Tensor], torch.Tensor]
FwdStepFunc = Callable[
[Optional[Batch], torch.nn.Module], Tuple[torch.Tensor, LossFunc]
]
def build_model(
model_provider_func: Callable[[Any, Dict[str, Any]], torch.nn.Module],
wrap_with_ddp: bool = True,
virtual_pipeline_model_parallel_size: Optional[int] = None,
model_type: ModelType = ModelType.encoder_or_decoder,
*args: Any,
**kwargs: Any,
) -> List[torch.nn.Module]:
"""Build the model satisfying pipeline model parallel requirements.
This function sets `pre_process` and `post_process` to `**kwargs` and pass `*args` and `**kwargs` to
`model_provider_func`.
Args:
model_provider_func: A function which takes `*args` and `**kwargs` and returns a `nn.Module`.
wrap_with_ddp: If :obj:`True`, wrap the instantiated model
with `torch.nn.parallel.distributed.DistributedDataParallel`, a.k.a. `DDP`.
virtual_pipeline_model_parallel_size: Specify when using interleaving scheduling pipeline model parallel.
model_type:
*args: arguments for model provider func
**kwargs: Keyword arguments for model provider func
Returns:
a list of `nn.Module`(s). If `virtual_pipeline_model_parallel_size` is not None,
the list has multiple models, otherwise one.
"""
if (
parallel_state.get_pipeline_model_parallel_world_size() > 1
and virtual_pipeline_model_parallel_size is not None
):
model = []
for i in range(virtual_pipeline_model_parallel_size):
cur_args = args
cur_kwargs = kwargs
parallel_state.set_virtual_pipeline_model_parallel_rank(i)
# Set pre_process and post_process only after virtual rank is set.
pre_process = parallel_state.is_pipeline_first_stage()
post_process = parallel_state.is_pipeline_last_stage()
cur_kwargs.update(
{"pre_process": pre_process, "post_process": post_process,}
)
this_model = model_provider_func(*cur_args, **cur_kwargs)
model.append(this_model)
else:
cur_args = args
cur_kwargs = kwargs
if model_type == ModelType.encoder_or_decoder:
pre_process = parallel_state.is_pipeline_first_stage()
post_process = parallel_state.is_pipeline_last_stage()
cur_kwargs.update(
{"pre_process": pre_process, "post_process": post_process,}
)
model = model_provider_func(*cur_args, **cur_kwargs)
elif model_type == ModelType.encoder_and_decoder:
pre_process = parallel_state.is_pipeline_first_stage()
post_process = parallel_state.is_pipeline_last_stage()
# `add_encoder` & `add_decoder` logic.
add_encoder, add_decoder = True, True
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
split_rank = parallel_state.get_pipeline_model_parallel_split_rank()
if split_rank is None:
raise RuntimeError(
"Split rank needs to be specified for model with both encoder and decoder."
)
rank = parallel_state.get_pipeline_model_parallel_rank()
world_size = parallel_state.get_pipeline_model_parallel_world_size()
pre_process = rank == 0 or rank == split_rank
post_process = rank == (split_rank - 1) or rank == (world_size - 1)
add_encoder = parallel_state.is_pipeline_stage_before_split()
add_decoder = parallel_state.is_pipeline_stage_after_split()
cur_kwargs.update(
{
"pre_process": pre_process,
"post_process": post_process,
"add_encoder": add_encoder,
"add_decoder": add_decoder,
}
)
model = model_provider_func(*cur_args, **cur_kwargs)
model.model_type = model_type
if not isinstance(model, list):
model = [model]
# Set tensor model parallel attributes if not set.
# Only parameters that are already tensor model parallel have these
# attributes set for them. We should make sure the default attributes
# are set for all params so the optimizer can use them.
for model_module in model:
for param in model_module.parameters():
set_defaults_if_not_set_tensor_model_parallel_attributes(param)
# Print number of parameters.
if (
parallel_state.model_parallel_is_initialized()
and parallel_state.get_data_parallel_rank() == 0
):
msg = " > number of parameters on (tensor, pipeline) model parallel rank ({}, {}): {}".format(
parallel_state.get_tensor_model_parallel_rank(),
parallel_state.get_pipeline_model_parallel_rank(),
_calc_number_of_params(model),
)
print(msg, flush=True)
# GPU allocation.
for model_module in model:
model_module.cuda(torch.cuda.current_device())
if wrap_with_ddp:
i = torch.cuda.current_device()
model = [
torch.nn.parallel.distributed.DistributedDataParallel(
model_module,
device_ids=[i],
output_device=i,
process_group=parallel_state.get_data_parallel_group(),
)
for model_module in model
]
return model
def _calc_number_of_params(model: List[torch.nn.Module]) -> int:
assert isinstance(model, list)
return sum(
[
sum([p.nelement() for p in model_module.parameters()])
for model_module in model
]
)
def _get_params_for_weight_decay_optimization(
model: Union[torch.nn.Module, List[torch.nn.Module]],
*,
no_weight_decay_modules=(FusedLayerNorm,),
) -> Dict[str, torch.nn.Parameter]:
"""Divide params into with-weight-decay and without-weight-decay groups.
Layernorms and biases will have no weight decay but the rest will.
"""
modules = listify_model(model)
weight_decay_params = {"params": []}
no_weight_decay_params = {"params": [], "weight_decay": 0.0}
for module in modules:
for module_ in module.modules():
if isinstance(module_, no_weight_decay_modules):
no_weight_decay_params["params"].extend(
[p for p in list(module_._parameters.values()) if p is not None]
)
else:
weight_decay_params["params"].extend(
[
p
for n, p in list(module_._parameters.items())
if p is not None and n != "bias"
]
)
no_weight_decay_params["params"].extend(
[
p
for n, p in list(module_._parameters.items())
if p is not None and n == "bias"
]
)
return weight_decay_params, no_weight_decay_params
def free_output_tensor(
output_tensors: Optional[Union[torch.Tensor, Sequence[torch.Tensor]]],
deallocate_pipeline_outputs: bool = False,
) -> None:
"""Pseudo-free the output tensor's `.data` field.
This method should be called right after the output tensor has been sent to the next
pipeline stage. At this point, the output tensor is only useful for its `.grad_fn` field,
and not its `.data`.
"""
if not deallocate_pipeline_outputs:
return
if output_tensors is None:
return
if isinstance(output_tensors, torch.Tensor):
output_tensors = [output_tensors]
for output_tensor in output_tensors:
output_tensor.data = torch.cuda.FloatTensor([0])
def custom_backward(output: torch.Tensor, grad_output: Optional[torch.Tensor]) -> None:
"""Directly call C++ autograd engine.
To make the `free_output_tensor` optimization work, the C++ autograd engine must be called
directly, bypassing PyTorch's `torch.autograd.backward`. PyTorch's `backward` checks that the
output and grad have the same shape, while C++ `backward` does not.
"""
assert (
output.numel() == 1
), "output should be pseudo-freed in schedule, to optimize memory consumption"
assert isinstance(output, torch.Tensor), "output == {}.".format(
type(output).__name__
)
assert isinstance(
grad_output, (torch.Tensor, type(None))
), "grad_outptu == {}.".format(type(grad_output).__name__)
# Handle scalar output
if grad_output is None:
assert output.numel() == 1, "Implicit grad requires scalar output."
grad_output = torch.ones_like(output, memory_format=torch.preserve_format)
# Call C++ engine [ see torch/csrc/autograd/python_engine.cpp ]
Variable._execution_engine.run_backward(
tensors=(output,),
grad_tensors=(grad_output,),
keep_graph=False,
create_graph=False,
inputs=(),
allow_unreachable=True,
accumulate_grad=True,
)
def forward_step(
forward_step_func: FwdStepFunc,
batch: Optional[Batch],
model: torch.nn.Module,
input_tensor: Optional[Union[torch.Tensor, List[torch.Tensor]]],
losses_reduced: List[torch.Tensor],
dtype: torch.dtype,
disable_autocast: bool = False,
checkpoint_activations_micro_batch: Optional[bool] = None,
) -> Union[torch.Tensor, Sequence[torch.Tensor]]:
"""Forward step for passed-in model.
If first stage, input tensor is obtained from batch, otherwise passed-in input_tensor is used.
Returns output tensor.
Args:
forward_step_func: Model specific function. This takes a minibatch and model as its arguments and
returns the model's output and the loss function.
batch: minibatch
model: unwrappable model
input_tensor:
losses_reduced:
dtype:
disable_autocast:
checkpoint_activations_micro_batch:
Returns:
output_tensor
"""
# timers = get_timers()
# timers("forward-compute").start()
unwrapped_model = unwrap_model(model)
model_type = get_model_type(unwrapped_model)
# NOTE (mkozuki): The passed `model` is expected to implement `set_input_tensor`.
# See https://github.com/NVIDIA/Megatron-LM/blob/5ac5571ba0265af4c491ee0af1508ca7589450c6/megatron/model/transformer.py#L679 # NOQA
# for the details of `set_input_tensor`.
unwrap_output_tensor = not isinstance(input_tensor, list)
if unwrap_output_tensor:
input_tensor = [input_tensor]
input_tensor = [inp.get() if isinstance(inp, FutureTensor) else inp for inp in input_tensor]
unwrapped_model.set_input_tensor(input_tensor)
with torch.cuda.amp.autocast(
enabled=not disable_autocast and dtype in (torch.half, torch.bfloat16),
dtype=dtype,
):
if checkpoint_activations_micro_batch is None:
output_tensor, loss_func = forward_step_func(batch, model)
else:
output_tensor, loss_func = forward_step_func(batch, model, checkpoint_activations_micro_batch)
if parallel_state.is_pipeline_last_stage():
output_tensor = loss_func(output_tensor)
loss, loss_reduced = output_tensor
output_tensor = loss / get_num_microbatches()
losses_reduced.append(loss_reduced)
# timers("forward-compute").stop()
# If T5 model (or other model with encoder and decoder)
# and in decoder stack, then send encoder_hidden_state
# downstream as well.
if (
parallel_state.is_pipeline_stage_after_split()
and model_type == ModelType.encoder_and_decoder
):
return [output_tensor, input_tensor[-1]]
if unwrap_output_tensor:
return output_tensor
return [output_tensor]
def backward_step(
input_tensor: Optional[torch.Tensor],
output_tensor: torch.Tensor,
output_tensor_grad: Optional[torch.Tensor],
model_type: ModelType,
*,
grad_scaler: Optional[torch.cuda.amp.GradScaler] = None,
deallocate_pipeline_outputs: bool = False,
) -> Union[None, torch.Tensor, Sequence[torch.Tensor]]:
"""Backward step through passed-in output tensor.
If last stage, output_tensor_grad is None, otherwise gradient of loss
with respect to stage's output tensor.
Returns gradient of loss with respect to input tensor (None if first
stage).
Args:
input_tensor:
output_tensor:
output_tensor_grad:
Keyword Arguments:
grad_scaler:
deallocate_pipeline_outputs: Experimental.
Returns:
input_tensor_grad
"""
# timers = get_timers()
# timers("backward-compute").start()
# Retain the grad on the input_tensor.
unwrap_input_tensor_grad = not isinstance(input_tensor, list)
if unwrap_input_tensor_grad:
input_tensor = [input_tensor]
input_tensor = [inp.get() if isinstance(inp, FutureTensor) else inp for inp in input_tensor]
for x in input_tensor:
if x is not None:
x.retain_grad()
if not isinstance(output_tensor, list):
output_tensor = [output_tensor]
output_tensor = [out.get() if isinstance(out, FutureTensor) else out for out in output_tensor]
if not isinstance(output_tensor_grad, list):
output_tensor_grad = [output_tensor_grad]
output_tensor_grad = [ogr.get() if isinstance(ogr, FutureTensor) else ogr for ogr in output_tensor_grad]
# Backward pass.
if grad_scaler is not None and output_tensor_grad[0] is None:
output_tensor[0] = grad_scaler.scale(output_tensor[0])
if deallocate_pipeline_outputs:
custom_backward(output_tensor[0], output_tensor_grad[0])
else:
torch.autograd.backward(output_tensor[0], grad_tensors=output_tensor_grad[0])
# Collect the grad of the input_tensor.
input_tensor_grad = [None]
if input_tensor is not None:
input_tensor_grad = []
for x in input_tensor:
input_tensor_grad.append(None if x is None else x.grad)
# Handle single skip connection if it exists (encoder_hidden_state in model with encoder and decoder).
if (
parallel_state.get_pipeline_model_parallel_world_size() > 1
and parallel_state.is_pipeline_stage_after_split()
and model_type == ModelType.encoder_and_decoder
):
if output_tensor_grad[1] is not None:
# todo (mkozuki): Replace the inplace add with `+= output_tensor_grad[1]`?
input_tensor_grad[-1].add_(output_tensor_grad[1])
# timers("backward-compute").stop()
return input_tensor_grad[0] if unwrap_input_tensor_grad else input_tensor_grad
| GeneSplice-main | GeneSplice/apex/apex/transformer/pipeline_parallel/schedules/common.py |
import contextlib
from typing import Any, Callable, List, Optional, Sequence, Union
import warnings
import torch
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel import p2p_communication
from apex.transformer.pipeline_parallel.schedules.common import Batch
from apex.transformer.pipeline_parallel.schedules.common import FwdStepFunc
from apex.transformer.pipeline_parallel.schedules.common import backward_step
from apex.transformer.pipeline_parallel.schedules.common import forward_step
from apex.transformer.pipeline_parallel.schedules.common import free_output_tensor
from apex.transformer.pipeline_parallel.utils import get_kth_microbatch
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
from apex.transformer.pipeline_parallel.utils import get_model_type
from apex.transformer.log_util import get_transformer_logger
__all__ = ["_forward_backward_pipelining_with_interleaving"]
_logger = get_transformer_logger(__name__)
# TODO(mkozuki): Reduce cyclomatic complexity
def _forward_backward_pipelining_with_interleaving(
forward_step_func: FwdStepFunc,
batch: List[Optional[Batch]],
model: List[torch.nn.Module],
*,
forward_only: bool,
tensor_shape: Optional[Union[List[int], torch.Size]] = None,
dtype: Optional[torch.dtype] = None,
grad_scaler: Optional[torch.cuda.amp.GradScaler] = None,
disable_autocast: bool = False,
deallocate_pipeline_outputs: bool = False,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
custom_sync_context_handler: Optional[Callable] = None,
custom_grad_sync_func: Optional[Callable] = None,
custom_param_sync_func: Optional[Callable] = None,
sync_batch_comm: bool = True,
num_micro_batches_with_partial_activation_checkpoints: Optional[int] = None,
overlap_p2p_comm: bool = False,
batch_p2p_comm: bool = True,
**kwargs,
) -> List[Union[torch.Tensor, Sequence[torch.Tensor]]]:
"""Run interleaved 1F1B schedule with communication between pipeline stages as needed.
This function assumes `batch` and `model` is a list of `Batch`'s and a list of `torch.nn.Module`, respectively.
This means that model is split into model chunks.
This pipeline parallel scheduling consists of three steps:
1. warmup
2. 1F1B a.k.a. steady state
3. cooldown
Note that if `forward_only` this scheduling consists of only warmup phase.
Args:
forward_step_func: A function which takes a minibatch and model as its arguments and
returns model's forward output and the loss function.
The loss function is supposed to take one `torch.Tensor` and
return a `torch.Tensor` of loss and a dictionary of `str` and `torch.Tensor`.
batch: A minibatch, i.e., a list of `torch.Tensor`'s.
model: A `torch.nn.Module` or a list of `torch.nn.Module`.
Keyword args:
forward_only:
tensor_shape: Shape of tensor. The tensor is expected to be 3D and its order of dimension
is supposed to be ``(sequence, batch, hidden)``.
dtype: dtype used in p2p communication. If ``None`` (default value),
torch.float32 will be used even if ``autocast`` is enabled.
grad_scaler:
disable_autocast:
deallocate_pipeline_outputs: If :obj:`True`, free the data of the output tensor of
each pipeline stage. Experimental.
sequence_parallel_enabled: Set to :obj:`True` for this function to handle sequence length.
When :obj:`True`, the sequence length on each tensor model parallel rank is updated
to :math:`original\_sequence\_length / tensor\_model\_parallel\_world\_size`.
custom_sync_context_handler: If provided, this is treated as a
function to construct a context manager to disable
asynchronous gradient reductions. Asynchronous gradient
reductions are only enabled in the final backward pass of
each model chunk.
custom_grad_sync_func: If provided, this is treated as a
function to launch asynchronous gradient reductions (e.g.
reduce-scatters with distributed optimizer). The function
should take one positional argument: a list of parameters
whose gradients should be synchronized. Asynchronous
gradient reductions are launched after the final backward
pass of each model chunk.
custom_param_sync_func: If provided, this is treated as a
function to launch asynchronous parameter synchronizations
(e.g. all-gathers with distributed optimizer). The
function should take one positional argument: a list of
parameters whose values should be synchronized.
Asynchronous parameter synchronizations are launched
before the first forward pass of each model chunk.
sync_batch_comm: If :obj:`False`, disable cuda synchronization after the batched communication.
To disable, https://github.com/pytorch/pytorch/pull/82450 would be required.
num_micro_batches_with_partial_activation_checkpoints: If :obj:`int`, set the number of
micro-batches checkpointing the activation of partial number of Transformer layers.
The rest of the micro-batch within the window of maximum outstanding micro-batch
backpropagations would checkpoint all Transformer layers.
overlap_p2p_comm: If :obj:`True`, returns cuda wait handles to scheduler instead of completing
the communication within the p2p transfer API instance. The scheduler manages the communication completion
to overlap with computation.
batch_p2p_comm: If :obj:`True`, use the batched send and receive api to conduct the communication of
a collection of send and receive operations between peer. If :obj:`False`, conduct each send and recv operation
individually.
Returns:
a list of loss `torch.Tensor`s if the last stage, empty list otherwise.
"""
if not isinstance(model, list):
raise RuntimeError("`model` must be a list of `nn.Module`'s'")
if deallocate_pipeline_outputs:
warnings.warn(
"`deallocate_pipeline_outputs` is experimental and subject to change. "
"This option is not recommended."
)
# Construct helper functions for async grad reductions
if custom_sync_context_handler is not None:
sync_context_handler = custom_sync_context_handler
else:
sync_context_handler = contextlib.nullcontext
sync_context = None
def disable_grad_sync():
"""Disable asynchronous grad reductions"""
nonlocal sync_context
if sync_context is None:
sync_context = sync_context_handler()
sync_context.__enter__()
def enable_grad_sync():
"""Enable asynchronous grad reductions"""
nonlocal sync_context
if sync_context is not None:
sync_context.__exit__(None, None, None)
sync_context = None
disable_grad_sync()
# mypy will blame the following if statement
if sequence_parallel_enabled:
seq_length, batch_size, hidden = tensor_shape
tensor_shape = (
seq_length // parallel_state.get_tensor_model_parallel_world_size(),
batch_size,
hidden,
)
num_model_chunks: int = len(model)
input_tensors: List[List[Union[None, torch.Tensor]]] = [
[] for _ in range(num_model_chunks)
]
output_tensors: List[List[Union[None, torch.Tensor]]] = [
[] for _ in range(num_model_chunks)
]
curr_iters: List[int] = [0 for _ in range(num_model_chunks)]
losses_reduced: List[Union[None, torch.Tensor]] = []
if not forward_only:
output_tensor_grads: List[List[Union[None, torch.Tensor]]] = [
[] for _ in range(num_model_chunks)
]
pipeline_parallel_size: int = parallel_state.get_pipeline_model_parallel_world_size()
pipeline_parallel_rank: int = parallel_state.get_pipeline_model_parallel_rank()
# Compute number of warmup and remaining microbatches.
num_microbatches: int = get_num_microbatches() * num_model_chunks
all_warmup_microbatches: bool = False
if forward_only:
num_warmup_microbatches: int = num_microbatches
else:
# Run all forward passes and then all backward passes if number of
# microbatches is just the number of pipeline stages.
# Otherwise, perform (num_model_chunks-1)*pipeline_parallel_size on
# all workers, followed by more microbatches after depending on
# stage ID (more forward passes for earlier stages, later stages can
# immediately start with 1F1B).
if get_num_microbatches() == pipeline_parallel_size:
num_warmup_microbatches = num_microbatches
all_warmup_microbatches = True
else:
num_warmup_microbatches = (
pipeline_parallel_size - pipeline_parallel_rank - 1
) * 2
num_warmup_microbatches += (num_model_chunks - 1) * pipeline_parallel_size
num_warmup_microbatches = min(num_warmup_microbatches, num_microbatches)
num_microbatches_remaining: int = num_microbatches - num_warmup_microbatches
# Checkpoint the activations of partial Transformer layers in a number of micro-batches
# within the maximum outstanding micro-batch backpropagations.
# Micro-batches with the ids less than 'num_micro_batches_with_partial_activation_checkpoints'
# checkpoint partial Transformer layers (or skip checkpointing) and
# the rest of micro-batches within a window of micro-batches checkpoint
# all Transformer layers. The window of micro-batches is set by the maximum
# outstanding backpropagations and becomes smaller at later pipeline stages.
# Please refer the appendix C in https://arxiv.org/pdf/2205.05198.pdf
max_outstanding_backprops = None
if num_micro_batches_with_partial_activation_checkpoints is not None:
max_outstanding_backprops = num_warmup_microbatches + 1
_logger.info(
f"num_microbatches: {num_microbatches}, "
f"num_warmup_microbatches: {num_warmup_microbatches}, "
f"num_microbatches_remaining: {num_microbatches_remaining}"
)
# Synchronize params for first two model chunks
if custom_param_sync_func is not None:
custom_param_sync_func(model[0].parameters())
custom_param_sync_func(model[1].parameters())
###################################################################################################################
# Helper function definitions.
###################################################################################################################
def get_model_chunk_id(microbatch_id: int, forward: bool) -> int:
"""Helper function to get the model chunk ID given the iteration number.
Each model chunk processes pipeline_parallel_size microbatches
at a time. We assume that the number of microbatches is a
multiple of pipeline_parallel_size*num_model_chunks.
"""
microbatch_group_size = pipeline_parallel_size * num_model_chunks
microbatch_id_in_group = microbatch_id % microbatch_group_size
model_chunk_id = microbatch_id_in_group // pipeline_parallel_size
if not forward:
model_chunk_id = num_model_chunks - model_chunk_id - 1
return model_chunk_id
def is_first_microbatch_for_model_chunk(microbatch_id: int) -> bool:
"""Helper function to check if an iteration is the first for a model
chunk.
"""
microbatch_group_size = pipeline_parallel_size * num_model_chunks
num_microbatch_groups = num_microbatches // microbatch_group_size
microbatch_group_id = microbatch_id // microbatch_group_size
microbatch_id_in_group = microbatch_id % microbatch_group_size
if microbatch_group_id == 0:
return microbatch_id_in_group % pipeline_parallel_size == 0
else:
return False
def is_last_microbatch_for_model_chunk(microbatch_id: int) -> bool:
"""Helper function to check if an iteration is the last for a model
chunk.
"""
microbatch_group_size = pipeline_parallel_size * num_model_chunks
num_microbatch_groups = num_microbatches // microbatch_group_size
microbatch_group_id = microbatch_id // microbatch_group_size
microbatch_id_in_group = microbatch_id % microbatch_group_size
if microbatch_group_id == num_microbatch_groups - 1:
return microbatch_id_in_group % pipeline_parallel_size == pipeline_parallel_size - 1
else:
return False
def forward_step_helper(
microbatch_id: int,
curr_iters: List[int],
checkpoint_activations_micro_batch: Optional[bool] = None,
) -> torch.Tensor:
"""Helper method to run forward step with model split into chunks
(run set_virtual_pipeline_model_parallel_rank() before calling forward_step()).
"""
model_chunk_id = get_model_chunk_id(microbatch_id, forward=True)
parallel_state.set_virtual_pipeline_model_parallel_rank(model_chunk_id)
# launch param synchronization for next model chunk
# Note: To achieve maximum performance, pipeline parallelism
# assumes all ranks have the same compute time. However,
# asynchronous communication tends to slow down compute. Thus,
# we launch asynchronous communication at the same time across
# the pipeline-parallel group.
if custom_param_sync_func is not None:
param_sync_microbatch_id = microbatch_id + pipeline_parallel_rank
if param_sync_microbatch_id < num_microbatches and is_first_microbatch_for_model_chunk(param_sync_microbatch_id):
param_sync_chunk_id = get_model_chunk_id(param_sync_microbatch_id, forward=True) + 1
if 1 < param_sync_chunk_id < num_model_chunks:
custom_param_sync_func(model[param_sync_chunk_id].parameters())
# forward step
if parallel_state.is_pipeline_first_stage() and len(
input_tensors[model_chunk_id]
) == len(output_tensors[model_chunk_id]):
input_tensors[model_chunk_id].append(None)
input_tensor = input_tensors[model_chunk_id][-1]
output_tensor = forward_step(
forward_step_func,
get_kth_microbatch(batch, curr_iters[model_chunk_id]),
model[model_chunk_id],
input_tensor,
losses_reduced,
dtype,
disable_autocast,
checkpoint_activations_micro_batch,
)
curr_iters[model_chunk_id] += 1
output_tensors[model_chunk_id].append(output_tensor)
# if forward-only, no need to save tensors for a backward pass
if forward_only:
input_tensors[model_chunk_id].pop()
output_tensors[model_chunk_id].pop()
return output_tensor
def backward_step_helper(microbatch_id: int) -> torch.Tensor:
"""Helper method to run backward step with model split into chunks
(run set_virtual_pipeline_model_parallel_rank() before calling backward_step()).
"""
model_chunk_id = get_model_chunk_id(microbatch_id, forward=False)
model_type = get_model_type(model[model_chunk_id])
parallel_state.set_virtual_pipeline_model_parallel_rank(model_chunk_id)
# launch grad synchronization (default)
if custom_grad_sync_func is None and is_last_microbatch_for_model_chunk(microbatch_id):
enable_grad_sync()
# backward step
if parallel_state.is_pipeline_last_stage():
if len(output_tensor_grads[model_chunk_id]) == 0:
output_tensor_grads[model_chunk_id].append(None)
input_tensor = input_tensors[model_chunk_id].pop(0)
output_tensor = output_tensors[model_chunk_id].pop(0)
output_tensor_grad = output_tensor_grads[model_chunk_id].pop(0)
input_tensor_grad = backward_step(
input_tensor,
output_tensor,
output_tensor_grad,
model_type=model_type,
grad_scaler=grad_scaler,
deallocate_pipeline_outputs=deallocate_pipeline_outputs,
)
# launch grad synchronization (custom grad sync)
# Note: To achieve maximum performance, pipeline parallelism
# assumes all ranks have the same compute time. However,
# asynchronous communication tends to slow down compute. Thus,
# we launch asynchronous communication at the same time across
# the pipeline-parallel group.
if custom_grad_sync_func is not None:
grad_sync_microbatch_id = microbatch_id - pipeline_parallel_rank
if grad_sync_microbatch_id >= 0 and is_last_microbatch_for_model_chunk(grad_sync_microbatch_id):
grad_sync_chunk_id = get_model_chunk_id(grad_sync_microbatch_id, forward=False)
enable_grad_sync()
custom_grad_sync_func(model[grad_sync_chunk_id].parameters())
disable_grad_sync()
return input_tensor_grad
###################################################################################################################
# Run warmup forward passes.
###################################################################################################################
fwd_wait_handles, bwd_wait_handles = None, None
parallel_state.set_virtual_pipeline_model_parallel_rank(0)
input_tensors[0].append(
p2p_communication.recv_forward(
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
)
_logger.info("Warmup phase")
for k in range(num_warmup_microbatches):
_logger.debug(f"warmup iter: {k} / {num_warmup_microbatches}")
# Decide to checkpoint all layers' activations of the current micro-batch
if max_outstanding_backprops is not None:
checkpoint_activations_micro_batch = k % max_outstanding_backprops >= \
num_micro_batches_with_partial_activation_checkpoints
else:
checkpoint_activations_micro_batch = None
if fwd_wait_handles is not None:
for wait_handle in fwd_wait_handles:
wait_handle.wait()
output_tensor = forward_step_helper(k, curr_iters, checkpoint_activations_micro_batch)
# Determine if tensor should be received from previous stage.
next_forward_model_chunk_id = get_model_chunk_id(k + 1, forward=True)
recv_prev = True
if parallel_state.is_pipeline_first_stage(ignore_virtual=True):
if next_forward_model_chunk_id == 0:
recv_prev = False
if k == (num_microbatches - 1):
recv_prev = False
_logger.debug(
f"next fwd model chunk ID: {next_forward_model_chunk_id}, recv_prev: {recv_prev}"
)
# Don't send tensor downstream if on last stage.
if parallel_state.is_pipeline_last_stage():
_logger.debug("Pipeline last stage, not sending tensor downstream")
output_tensor = None
if overlap_p2p_comm:
# P2P communications in warmup are not overlapped with computes. We split P2P
# communications for activation forward and activation_gradient backward in warmup,
# to match the send/recv API granularity in 1F1B in case of using batched send/recv API.
# Send and receive tensors as appropriate (send tensors computed
# in this iteration; receive tensors for next iteration).
_logger.debug("send fwd and receive fwd")
input_tensor, fwd_wait_handles = p2p_communication.send_forward_recv_forward(
output_tensor,
recv_prev=recv_prev,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
overlap_p2p_comm=True,
batch_p2p_comm=batch_p2p_comm,
)
if (
k == (num_warmup_microbatches - 1)
and not forward_only
and not all_warmup_microbatches
):
input_tensor_grad = None
recv_next = True
if parallel_state.is_pipeline_last_stage(ignore_virtual=True):
recv_next = False
_logger.debug("send bwd and receive bwd")
output_tensor_grad, bwd_wait_handles = p2p_communication.send_backward_recv_backward(
input_tensor_grad,
recv_next=recv_next,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
overlap_p2p_comm=True,
batch_p2p_comm=batch_p2p_comm,
)
output_tensor_grads[num_model_chunks - 1].append(output_tensor_grad)
input_tensors[next_forward_model_chunk_id].append(input_tensor)
else:
# Send and receive tensors as appropriate (send tensors computed
# in this iteration; receive tensors for next iteration).
if (
k == (num_warmup_microbatches - 1)
and not forward_only
and not all_warmup_microbatches
):
input_tensor_grad = None
recv_next = True
if parallel_state.is_pipeline_last_stage(ignore_virtual=True):
recv_next = False
_logger.debug("send fwd&bwd and receive fwd&bwd")
(
input_tensor,
output_tensor_grad,
) = p2p_communication.send_forward_backward_recv_forward_backward(
output_tensor,
input_tensor_grad,
recv_prev=recv_prev,
recv_next=recv_next,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
output_tensor_grads[num_model_chunks - 1].append(output_tensor_grad)
else:
_logger.debug("send fwd and receive fwd")
input_tensor = p2p_communication.send_forward_recv_forward(
output_tensor,
recv_prev=recv_prev,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
input_tensors[next_forward_model_chunk_id].append(input_tensor)
free_output_tensor(output_tensor, deallocate_pipeline_outputs)
###################################################################################################################
# Run 1F1B in steady state.
###################################################################################################################
_logger.info("Steady phase")
for k in range(num_microbatches_remaining):
# Forward pass.
_logger.debug(f" steady phase iter {k} / {num_microbatches_remaining}")
forward_k = k + num_warmup_microbatches
# Decide to checkpoint all layers' activations of the current micro-batch
if max_outstanding_backprops is not None:
checkpoint_activations_micro_batch = (
forward_k % max_outstanding_backprops >= num_micro_batches_with_partial_activation_checkpoints
)
else:
checkpoint_activations_micro_batch = None
if overlap_p2p_comm:
if fwd_wait_handles is not None:
for wait_handle in fwd_wait_handles:
wait_handle.wait()
output_tensor = forward_step_helper(forward_k, curr_iters, checkpoint_activations_micro_batch)
# Set forward model chunk id
forward_model_chunk_id = get_model_chunk_id(forward_k, forward=True)
parallel_state.set_virtual_pipeline_model_parallel_rank(forward_model_chunk_id)
# Last virtual stage no activation tensor to send
if parallel_state.is_pipeline_last_stage():
output_tensor = None
# Determine if the current virtual stage has an activation tensor to receive
recv_prev = True
if parallel_state.is_pipeline_first_stage(ignore_virtual=True):
# First stage is ahead of last stage by (pipeline_parallel_size - 1).
next_forward_model_chunk_id = get_model_chunk_id(
forward_k - (pipeline_parallel_size - 1), forward=True
)
if next_forward_model_chunk_id == (num_model_chunks - 1):
recv_prev = False
next_forward_model_chunk_id += 1
else:
next_forward_model_chunk_id = get_model_chunk_id(
forward_k + 1, forward=True
)
# If last iteration, don't receive; we already received one extra
# before the start of the for loop.
if k == (num_microbatches_remaining - 1):
recv_prev = False
# Send activation tensor to the next stage and receive activation tensor from the
# previous stage
_logger.debug("send fwd and receive fwd")
input_tensor, fwd_wait_handles = p2p_communication.send_forward_recv_forward(
output_tensor,
recv_prev=recv_prev,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
overlap_p2p_comm=True,
batch_p2p_comm=batch_p2p_comm,
)
if bwd_wait_handles is not None:
for wait_handle in bwd_wait_handles:
wait_handle.wait()
# Backward pass.
backward_k = k
input_tensor_grad = backward_step_helper(backward_k)
# Set backward model chunk id
backward_model_chunk_id = get_model_chunk_id(backward_k, forward=False)
parallel_state.set_virtual_pipeline_model_parallel_rank(backward_model_chunk_id)
_logger.debug(
f"fwd/bwd model chunk id: {forward_model_chunk_id}/{backward_model_chunk_id}"
)
# First virtual stage no activation gradient tensor to send
if parallel_state.is_pipeline_first_stage():
input_tensor_grad = None
# Determine if the current virtual stage has an activation gradient tensor to receive
recv_next = True
if parallel_state.is_pipeline_last_stage(ignore_virtual=True):
# Last stage is ahead of first stage by (pipeline_parallel_size - 1).
next_backward_model_chunk_id = get_model_chunk_id(
backward_k - (pipeline_parallel_size - 1), forward=False
)
if next_backward_model_chunk_id == 0:
recv_next = False
next_backward_model_chunk_id -= 1
else:
next_backward_model_chunk_id = get_model_chunk_id(
backward_k + 1, forward=False
)
# Send activation grad tensor to the previous stage and receive activation grad tensor
# from the previous stage
_logger.debug("send bwd and receive bwd")
output_tensor_grad, bwd_wait_handles = p2p_communication.send_backward_recv_backward(
input_tensor_grad,
recv_next=recv_next,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
overlap_p2p_comm=True,
batch_p2p_comm=batch_p2p_comm,
)
else:
output_tensor = forward_step_helper(forward_k, curr_iters, checkpoint_activations_micro_batch)
# Backward pass.
backward_k = k
input_tensor_grad = backward_step_helper(backward_k)
# Send output_tensor and input_tensor_grad, receive input_tensor
# and output_tensor_grad.
# Determine if current stage has anything to send in either direction,
# otherwise set tensor to None.
forward_model_chunk_id = get_model_chunk_id(forward_k, forward=True)
parallel_state.set_virtual_pipeline_model_parallel_rank(forward_model_chunk_id)
if parallel_state.is_pipeline_last_stage():
output_tensor = None
backward_model_chunk_id = get_model_chunk_id(backward_k, forward=False)
parallel_state.set_virtual_pipeline_model_parallel_rank(backward_model_chunk_id)
_logger.debug(
f"fwd/bwd model chunk id: {forward_model_chunk_id}/{backward_model_chunk_id}"
)
if parallel_state.is_pipeline_first_stage():
input_tensor_grad = None
# Determine if peers are sending, and where in data structure to put
# received tensors.
recv_prev = True
if parallel_state.is_pipeline_first_stage(ignore_virtual=True):
# First stage is ahead of last stage by (pipeline_parallel_size - 1).
next_forward_model_chunk_id = get_model_chunk_id(
forward_k - (pipeline_parallel_size - 1), forward=True
)
if next_forward_model_chunk_id == (num_model_chunks - 1):
recv_prev = False
next_forward_model_chunk_id += 1
else:
next_forward_model_chunk_id = get_model_chunk_id(
forward_k + 1, forward=True
)
recv_next = True
if parallel_state.is_pipeline_last_stage(ignore_virtual=True):
# Last stage is ahead of first stage by (pipeline_parallel_size - 1).
next_backward_model_chunk_id = get_model_chunk_id(
backward_k - (pipeline_parallel_size - 1), forward=False
)
if next_backward_model_chunk_id == 0:
recv_next = False
next_backward_model_chunk_id -= 1
else:
next_backward_model_chunk_id = get_model_chunk_id(
backward_k + 1, forward=False
)
# If last iteration, don't receive; we already received one extra
# before the start of the for loop.
if k == (num_microbatches_remaining - 1):
recv_prev = False
# Communicate tensors.
_logger.debug("send fwd&bwd and receive fwd&bwd")
(
input_tensor,
output_tensor_grad,
) = p2p_communication.send_forward_backward_recv_forward_backward(
output_tensor,
input_tensor_grad,
recv_prev=recv_prev,
recv_next=recv_next,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
free_output_tensor(output_tensor, deallocate_pipeline_outputs)
# Put input_tensor and output_tensor_grad in data structures in the
# right location.
if recv_prev:
input_tensors[next_forward_model_chunk_id].append(input_tensor)
if recv_next:
output_tensor_grads[next_backward_model_chunk_id].append(output_tensor_grad)
###################################################################################################################
# Run cooldown backward passes (flush out pipeline).
###################################################################################################################
_logger.info("Cooldown phase")
if not forward_only:
if overlap_p2p_comm and bwd_wait_handles is not None:
for wait_handle in bwd_wait_handles:
wait_handle.wait()
if all_warmup_microbatches:
output_tensor_grads[num_model_chunks - 1].append(
p2p_communication.recv_backward(
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
)
for k in range(num_microbatches_remaining, num_microbatches):
_logger.debug(
f"cooldown iter {k} in range({num_microbatches_remaining}, {num_microbatches})"
)
input_tensor_grad = backward_step_helper(k)
next_backward_model_chunk_id = get_model_chunk_id(k + 1, forward=False)
recv_next = True
if parallel_state.is_pipeline_last_stage(ignore_virtual=True):
if next_backward_model_chunk_id == (num_model_chunks - 1):
recv_next = False
if k == (num_microbatches - 1):
recv_next = False
output_tensor_grads[next_backward_model_chunk_id].append(
p2p_communication.send_backward_recv_backward(
input_tensor_grad,
recv_next=recv_next,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
)
# Make sure to exit context handler for async grad reductions
enable_grad_sync()
return losses_reduced
| GeneSplice-main | GeneSplice/apex/apex/transformer/pipeline_parallel/schedules/fwd_bwd_pipelining_with_interleaving.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron arguments."""
import argparse
import os
import torch
def parse_args(extra_args_provider=None, defaults={}, override_args={},
ignore_unknown_args=False):
"""Parse all arguments."""
parser = argparse.ArgumentParser(description='Megatron-LM Arguments',
allow_abbrev=False)
# Standard arguments.
parser = _add_network_size_args(parser)
parser = _add_regularization_args(parser)
parser = _add_training_args(parser)
parser = _add_initialization_args(parser)
parser = _add_learning_rate_args(parser)
parser = _add_checkpointing_args(parser)
parser = _add_mixed_precision_args(parser)
parser = _add_distributed_args(parser)
parser = _add_validation_args(parser)
parser = _add_data_args(parser)
parser = _add_autoresume_args(parser)
parser = _add_biencoder_args(parser)
parser = _add_vision_args(parser)
parser = _add_logging_args(parser)
# NOTE(mkozuki): This option is added to investigate the potential of `torch.autograd.graph.save_on_cpu()`.
# ref: https://pytorch.org/docs/stable/autograd.html#torch.autograd.graph.save_on_cpu.
parser.add_argument('--cpu-offload', action='store_true', default=False, help='Turns on CPU offloading')
# Custom arguments.
if extra_args_provider is not None:
parser = extra_args_provider(parser)
# Parse.
if ignore_unknown_args:
args, _ = parser.parse_known_args()
else:
args = parser.parse_args()
# Distributed args.
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
for key in override_args:
setattr(args, key, override_args[key])
# Tensor model parallel size.
args.tensor_model_parallel_size = min(
args.tensor_model_parallel_size, args.world_size)
assert args.world_size % args.tensor_model_parallel_size == 0, 'world size'\
' ({}) is not divisible by tensor model parallel size ({})'.format(
args.world_size, args.tensor_model_parallel_size)
# Pipeline model parallel size.
args.pipeline_model_parallel_size = min(
args.pipeline_model_parallel_size,
(args.world_size // args.tensor_model_parallel_size))
args.transformer_pipeline_model_parallel_size = (
args.pipeline_model_parallel_size - 1
if args.standalone_embedding_stage else
args.pipeline_model_parallel_size
)
# Checks.
model_parallel_size = args.pipeline_model_parallel_size * \
args.tensor_model_parallel_size
assert args.world_size % model_parallel_size == 0, 'world size is not'\
' divisible by tensor parallel size ({}) times pipeline parallel ' \
'size ({})'.format(args.world_size, args.tensor_model_parallel_size,
args.pipeline_model_parallel_size)
args.data_parallel_size = args.world_size // model_parallel_size
if args.rank == 0:
print('using world size: {}, data-parallel-size: {}, '
'tensor-model-parallel size: {}, '
'pipeline-model-parallel size: {} '.format(
args.world_size, args.data_parallel_size,
args.tensor_model_parallel_size,
args.pipeline_model_parallel_size), flush=True)
if args.pipeline_model_parallel_size > 1:
if args.pipeline_model_parallel_split_rank is not None:
assert args.pipeline_model_parallel_split_rank < \
args.pipeline_model_parallel_size, 'split rank needs'\
' to be less than pipeline model parallel size ({})'.format(
args.pipeline_model_parallel_size)
# Deprecated arguments
assert args.batch_size is None, '--batch-size argument is no longer ' \
'valid, use --micro-batch-size instead'
del args.batch_size
assert args.warmup is None, '--warmup argument is no longer valid, use ' \
'--lr-warmup-fraction instead'
del args.warmup
assert args.model_parallel_size is None, '--model-parallel-size is no ' \
'longer valid, use --tensor-model-parallel-size instead'
del args.model_parallel_size
if args.checkpoint_activations:
args.recompute_granularity = 'full'
args.recompute_method = 'uniform'
if args.rank == 0:
print('--checkpoint-activations is no longer valid, '
'use --recompute-granularity and --recompute-method instead. '
'Defaulting to recompute-granularity=full and recompute-method=uniform.')
del args.checkpoint_activations
if args.recompute_activations:
args.recompute_granularity = 'selective'
del args.recompute_activations
# Set input defaults.
for key in defaults:
# For default to be valid, it should not be provided in the
# arguments that are passed to the program. We check this by
# ensuring the arg is set to None.
if getattr(args, key) is not None:
if args.rank == 0:
print('WARNING: overriding default arguments for {key}:{v} \
with {key}:{v2}'.format(key=key, v=defaults[key],
v2=getattr(args, key)),
flush=True)
else:
setattr(args, key, defaults[key])
# Batch size.
assert args.micro_batch_size is not None
assert args.micro_batch_size > 0
if args.global_batch_size is None:
args.global_batch_size = args.micro_batch_size * args.data_parallel_size
if args.rank == 0:
print('setting global batch size to {}'.format(
args.global_batch_size), flush=True)
assert args.global_batch_size > 0
if args.num_layers_per_virtual_pipeline_stage is not None:
assert args.pipeline_model_parallel_size > 2, \
'pipeline-model-parallel size should be greater than 2 with ' \
'interleaved schedule'
assert args.num_layers % args.num_layers_per_virtual_pipeline_stage == 0, \
'number of layers is not divisible by number of layers per virtual ' \
'pipeline stage'
args.virtual_pipeline_model_parallel_size = \
(args.num_layers // args.pipeline_model_parallel_size) // \
args.num_layers_per_virtual_pipeline_stage
else:
args.virtual_pipeline_model_parallel_size = None
# Parameters dtype.
args.params_dtype = torch.float
if args.fp16:
assert not args.bf16
args.params_dtype = torch.half
if args.bf16:
assert not args.fp16
args.params_dtype = torch.bfloat16
# bfloat16 requires gradient accumulation and all-reduce to
# be done in fp32.
if not args.accumulate_allreduce_grads_in_fp32:
args.accumulate_allreduce_grads_in_fp32 = True
if args.rank == 0:
print('accumulate and all-reduce gradients in fp32 for '
'bfloat16 data type.', flush=True)
if args.rank == 0:
print('using {} for parameters ...'.format(args.params_dtype),
flush=True)
# If we do accumulation and all-reduces in fp32, we need to have local DDP
# and we should make sure use-contiguous-buffers-in-local-ddp is not off.
if args.accumulate_allreduce_grads_in_fp32:
assert args.DDP_impl == 'local'
assert args.use_contiguous_buffers_in_local_ddp
else:
if args.gradient_accumulation_fusion:
args.gradient_accumulation_fusion = False
if args.rank == 0:
print('Gradient accumulation fusion to linear layer weight '
'gradient computation is supported only with fp32 '
'gradient accumulation. Setting gradient_accumulation_fusion '
'to False', flush=True)
# For torch DDP, we do not use contiguous buffer
if args.DDP_impl == 'torch':
args.use_contiguous_buffers_in_local_ddp = False
if args.dataloader_type is None:
args.dataloader_type = 'single'
# Consumed tokens.
args.consumed_train_samples = 0
args.consumed_valid_samples = 0
# Iteration-based training.
if args.train_iters:
# If we use iteration-based training, make sure the
# sample-based options are off.
assert args.train_samples is None, \
'expected iteration-based training'
assert args.lr_decay_samples is None, \
'expected iteration-based learning rate decay'
assert args.lr_warmup_samples == 0, \
'expected iteration-based learning rate warmup'
assert args.rampup_batch_size is None, \
'expected no batch-size rampup for iteration-based training'
if args.lr_warmup_fraction is not None:
assert args.lr_warmup_iters == 0, \
'can only specify one of lr-warmup-fraction and lr-warmup-iters'
# Sample-based training.
if args.train_samples:
# If we use sample-based training, make sure the
# iteration-based options are off.
assert args.train_iters is None, \
'expected sample-based training'
assert args.lr_decay_iters is None, \
'expected sample-based learning rate decay'
assert args.lr_warmup_iters == 0, \
'expected sample-based learnig rate warmup'
if args.lr_warmup_fraction is not None:
assert args.lr_warmup_samples == 0, \
'can only specify one of lr-warmup-fraction ' \
'and lr-warmup-samples'
# Check required arguments.
required_args = ['num_layers', 'hidden_size', 'num_attention_heads',
'max_position_embeddings']
for req_arg in required_args:
_check_arg_is_not_none(args, req_arg)
# Checks.
if args.ffn_hidden_size is None:
args.ffn_hidden_size = 4 * args.hidden_size
if args.kv_channels is None:
assert args.hidden_size % args.num_attention_heads == 0
args.kv_channels = args.hidden_size // args.num_attention_heads
if args.seq_length is not None:
assert args.encoder_seq_length is None
args.encoder_seq_length = args.seq_length
else:
assert args.encoder_seq_length is not None
args.seq_length = args.encoder_seq_length
if args.seq_length is not None:
assert args.max_position_embeddings >= args.seq_length
if args.decoder_seq_length is not None:
assert args.max_position_embeddings >= args.decoder_seq_length
if args.lr is not None:
assert args.min_lr <= args.lr
if args.save is not None:
assert args.save_interval is not None
# Mixed precision checks.
if args.fp16_lm_cross_entropy:
assert args.fp16, 'lm cross entropy in fp16 only support in fp16 mode.'
if args.fp32_residual_connection:
assert args.fp16 or args.bf16, \
'residual connection in fp32 only supported when using fp16 or bf16.'
if args.weight_decay_incr_style == 'constant':
assert args.start_weight_decay is None
assert args.end_weight_decay is None
args.start_weight_decay = args.weight_decay
args.end_weight_decay = args.weight_decay
else:
assert args.start_weight_decay is not None
assert args.end_weight_decay is not None
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
# Persistent fused layer norm.
if TORCH_MAJOR < 1 or (TORCH_MAJOR == 1 and TORCH_MINOR < 11):
args.no_persist_layer_norm = True
if args.rank == 0:
print('Persistent fused layer norm kernel is supported from '
'pytorch v1.11 (nvidia pytorch container paired with v1.11). '
'Defaulting to no_persist_layer_norm=True')
# Activation recomputing.
if args.distribute_saved_activations:
assert args.tensor_model_parallel_size > 1, 'can distribute ' \
'recomputed activations only across tensor model ' \
'parallel groups'
assert args.recompute_granularity == 'full', \
'distributed recompute activations is only '\
'application to full recompute granularity'
assert args.recompute_method is not None, \
'for distributed recompute activations to work you '\
'need to use a recompute method '
assert TORCH_MAJOR >= 1 and TORCH_MINOR >= 10, \
'distributed recompute activations are supported for pytorch ' \
'v1.10 and above (Nvidia Pytorch container >= 21.07). Current ' \
'pytorch version is v%s.%s.' % (TORCH_MAJOR, TORCH_MINOR)
if args.recompute_granularity == 'selective':
assert args.recompute_method is None, \
'recompute method is not yet supported for ' \
'selective recomputing granularity'
# disable async_tensor_model_parallel_allreduce when
# model parallel memory optimization is enabled
if args.sequence_parallel:
args.async_tensor_model_parallel_allreduce = False
_print_args(args)
return args
def _print_args(args):
"""Print arguments."""
if args.rank == 0:
print('------------------------ arguments ------------------------',
flush=True)
str_list = []
for arg in vars(args):
dots = '.' * (48 - len(arg))
str_list.append(' {} {} {}'.format(arg, dots, getattr(args, arg)))
for arg in sorted(str_list, key=lambda x: x.lower()):
print(arg, flush=True)
print('-------------------- end of arguments ---------------------',
flush=True)
def _check_arg_is_not_none(args, arg):
assert getattr(args, arg) is not None, '{} argument is None'.format(arg)
def _add_inference_args(parser):
group = parser.add_argument_group(title='inference')
group.add_argument('--inference-batch-times-seqlen-threshold',
type=int, default=512,
help='During inference, if batch-size times '
'sequence-length is smaller than this threshold '
'then we will not use pipelining, otherwise we will.')
return parser
def _add_network_size_args(parser):
group = parser.add_argument_group(title='network size')
group.add_argument('--num-layers', type=int, default=None,
help='Number of transformer layers.')
group.add_argument('--hidden-size', type=int, default=None,
help='Tansformer hidden size.')
group.add_argument('--ffn-hidden-size', type=int, default=None,
help='Transformer Feed-Forward Network hidden size. '
'This is set to 4*hidden-size if not provided')
group.add_argument('--num-attention-heads', type=int, default=None,
help='Number of transformer attention heads.')
group.add_argument('--kv-channels', type=int, default=None,
help='Projection weights dimension in multi-head '
'attention. This is set to '
' args.hidden_size // args.num_attention_heads '
'if not provided.')
group.add_argument('--max-position-embeddings', type=int, default=None,
help='Maximum number of position embeddings to use. '
'This is the size of position embedding.')
group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
help='Pad the vocab size to be divisible by this value.'
'This is added for computational efficieny reasons.')
group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
help='Layer norm epsilon.')
group.add_argument('--apply-residual-connection-post-layernorm',
action='store_true',
help='If set, use original BERT residula connection '
'ordering.')
group.add_argument('--openai-gelu', action='store_true',
help='Use OpenAIs GeLU implementation. This option'
'should not be used unless for backward compatibility'
'reasons.')
group.add_argument('--onnx-safe', type=bool, required=False,
help='Use workarounds for known problems with '
'Torch ONNX exporter')
group.add_argument('--bert-no-binary-head', action='store_false',
help='Disable BERT binary head.',
dest='bert_binary_head')
group.add_argument('--num-experts', type=int, default=None,
help='Number of Experts in Switch Transformer (None means no Switch)')
return parser
def _add_logging_args(parser):
group = parser.add_argument_group(title='logging')
group.add_argument('--log-params-norm', action='store_true',
help='If set, calculate and log parameters norm.')
group.add_argument('--log-num-zeros-in-grad', action='store_true',
help='If set, calculate and log the number of zeros in gradient.')
group.add_argument('--tensorboard-log-interval', type=int, default=1,
help='Report to tensorboard interval.')
group.add_argument('--tensorboard-queue-size', type=int, default=1000,
help='Size of the tensorboard queue for pending events '
'and summaries before one of the ‘add’ calls forces a '
'flush to disk.')
group.add_argument('--log-timers-to-tensorboard', action='store_true',
help='If set, write timers to tensorboard.')
group.add_argument('--log-batch-size-to-tensorboard', action='store_true',
help='If set, write batch-size to tensorboard.')
group.add_argument('--no-log-learnig-rate-to-tensorboard',
action='store_false',
help='Disable learning rate logging to tensorboard.',
dest='log_learning_rate_to_tensorboard')
group.add_argument('--no-log-loss-scale-to-tensorboard',
action='store_false',
help='Disable loss-scale logging to tensorboard.',
dest='log_loss_scale_to_tensorboard')
group.add_argument('--log-validation-ppl-to-tensorboard',
action='store_true',
help='If set, write validation perplexity to '
'tensorboard.')
group.add_argument('--log-memory-to-tensorboard',
action='store_true',
help='Enable memory logging to tensorboard.')
group.add_argument('--log-world-size-to-tensorboard',
action='store_true',
help='Enable world size logging to tensorboard.')
return parser
def _add_regularization_args(parser):
group = parser.add_argument_group(title='regularization')
group.add_argument('--attention-dropout', type=float, default=0.1,
help='Post attention dropout probability.')
group.add_argument('--hidden-dropout', type=float, default=0.1,
help='Dropout probability for hidden state transformer.')
group.add_argument('--weight-decay', type=float, default=0.01,
help='Weight decay coefficient for L2 regularization.')
group.add_argument('--start-weight-decay', type=float,
help='Initial weight decay coefficient for L2 regularization.')
group.add_argument('--end-weight-decay', type=float,
help='End of run weight decay coefficient for L2 regularization.')
group.add_argument('--weight-decay-incr-style', type=str, default='constant',
choices=['constant', 'linear', 'cosine'],
help='Weight decay increment function.')
group.add_argument('--clip-grad', type=float, default=1.0,
help='Gradient clipping based on global L2 norm.')
group.add_argument('--adam-beta1', type=float, default=0.9,
help='First coefficient for computing running averages '
'of gradient and its square')
group.add_argument('--adam-beta2', type=float, default=0.999,
help='Second coefficient for computing running averages '
'of gradient and its square')
group.add_argument('--adam-eps', type=float, default=1e-08,
help='Term added to the denominator to improve'
'numerical stability')
group.add_argument('--sgd-momentum', type=float, default=0.9,
help='Momentum factor for sgd')
return parser
def _add_training_args(parser):
group = parser.add_argument_group(title='training')
group.add_argument('--micro-batch-size', type=int, default=None,
help='Batch size per model instance (local batch size). '
'Global batch size is local batch size times data '
'parallel size times number of micro batches.')
group.add_argument('--batch-size', type=int, default=None,
help='Old batch size parameter, do not use. '
'Use --micro-batch-size instead')
group.add_argument('--global-batch-size', type=int, default=None,
help='Training batch size. If set, it should be a '
'multiple of micro-batch-size times data-parallel-size. '
'If this value is None, then '
'use micro-batch-size * data-parallel-size as the '
'global batch size. This choice will result in 1 for '
'number of micro-batches.')
group.add_argument('--rampup-batch-size', nargs='*', default=None,
help='Batch size ramp up with the following values:'
' --rampup-batch-size <start batch size> '
' <batch size incerement> '
' <ramp-up samples> '
'For example:'
' --rampup-batch-size 16 8 300000 \ '
' --global-batch-size 1024'
'will start with global batch size 16 and over '
' (1024 - 16) / 8 = 126 intervals will increase'
'the batch size linearly to 1024. In each interval'
'we will use approximately 300000 / 126 = 2380 samples.')
group.add_argument('--recompute-activations', action='store_true',
help='recompute activation to allow for training '
'with larger models, sequences, and batch sizes.')
group.add_argument('--recompute-granularity', type=str, default=None,
choices=['full', 'selective'],
help='Checkpoint activations to allow for training '
'with larger models, sequences, and batch sizes. '
'It is supported at two granularities 1) full: '
'whole transformer layer is recomputed, '
'2) selective: core attention part of the transformer '
'layer is recomputed.')
group.add_argument('--distribute-saved-activations',
action='store_true',
help='If set, distribute recomputed activations '
'across model parallel group.')
group.add_argument('--recompute-method', type=str, default=None,
choices=['uniform', 'block'],
help='1) uniform: uniformly divide the total number of '
'Transformer layers and recompute the input activation of '
'each divided chunk at specified granularity, '
'2) recompute the input activations of only a set number of '
'individual Transformer layers per pipeline stage and do the '
'rest without any recomputing at specified granularity'
'default) do not apply activations recompute to any layers')
group.add_argument('--recompute-num-layers', type=int, default=1,
help='1) uniform: the number of Transformer layers in each '
'uniformly divided recompute unit, '
'2) block: the number of individual Transformer layers '
'to recompute within each pipeline stage.')
# deprecated
group.add_argument('--checkpoint-activations', action='store_true',
help='Checkpoint activation to allow for training '
'with larger models, sequences, and batch sizes.')
group.add_argument('--train-iters', type=int, default=None,
help='Total number of iterations to train over all '
'training runs. Note that either train-iters or '
'train-samples should be provided.')
group.add_argument('--train-samples', type=int, default=None,
help='Total number of samples to train over all '
'training runs. Note that either train-iters or '
'train-samples should be provided.')
group.add_argument('--log-interval', type=int, default=100,
help='Report loss and timing interval.')
group.add_argument('--exit-interval', type=int, default=None,
help='Exit the program after the iteration is divisible '
'by this value.')
group.add_argument('--exit-duration-in-mins', type=int, default=None,
help='Exit the program after this many minutes.')
group.add_argument('--tensorboard-dir', type=str, default=None,
help='Write TensorBoard logs to this directory.')
group.add_argument('--no-masked-softmax-fusion',
action='store_false',
help='Disable fusion of query_key_value scaling, '
'masking, and softmax.',
dest='masked_softmax_fusion')
group.add_argument('--no-bias-gelu-fusion', action='store_false',
help='Disable bias and gelu fusion.',
dest='bias_gelu_fusion')
group.add_argument('--no-bias-dropout-fusion', action='store_false',
help='Disable bias and dropout fusion.',
dest='bias_dropout_fusion')
group.add_argument('--optimizer', type=str, default='adam',
choices=['adam', 'sgd'],
help='Optimizer function')
group.add_argument('--dataloader-type', type=str, default=None,
choices=['single', 'cyclic'],
help='Single pass vs multiple pass data loader')
group.add_argument('--no-async-tensor-model-parallel-allreduce',
action='store_true',
help='Disable asynchronous execution of '
'tensor-model-parallel all-reduce with weight '
'gradient compuation of a column-linear layer.',
dest='async_tensor_model_parallel_allreduce')
group.add_argument('--no-persist-layer-norm', action='store_true',
help='Disable using persistent fused layer norm kernel. '
'This kernel supports only a set of hidden sizes. Please '
'check persist_ln_hidden_sizes if your hidden '
'size is supported.')
group.add_argument('--sequence-parallel', action='store_true',
help='Enable sequence parallel optimization.')
group.add_argument('--no-gradient-accumulation-fusion',
action='store_false',
help='Disable fusing gradient accumulation to weight '
'gradient computation of linear layers',
dest='gradient_accumulation_fusion')
return parser
def _add_initialization_args(parser):
group = parser.add_argument_group(title='initialization')
group.add_argument('--seed', type=int, default=1234,
help='Random seed used for python, numpy, '
'pytorch, and cuda.')
group.add_argument('--init-method-std', type=float, default=0.02,
help='Standard deviation of the zero mean normal '
'distribution used for weight initialization.')
group.add_argument('--init-method-xavier-uniform', action='store_true',
help='Enable Xavier uniform parameter initialization')
return parser
def _add_learning_rate_args(parser):
group = parser.add_argument_group(title='learning rate')
group.add_argument('--lr', type=float, default=None,
help='Initial learning rate. Depending on decay style '
'and initial warmup, the learing rate at each '
'iteration would be different.')
group.add_argument('--lr-decay-style', type=str, default='linear',
choices=['constant', 'linear', 'cosine'],
help='Learning rate decay function.')
group.add_argument('--lr-decay-iters', type=int, default=None,
help='number of iterations to decay learning rate over,'
' If None defaults to `--train-iters`')
group.add_argument('--lr-decay-samples', type=int, default=None,
help='number of samples to decay learning rate over,'
' If None defaults to `--train-samples`')
group.add_argument('--lr-warmup-fraction', type=float, default=None,
help='fraction of lr-warmup-(iters/samples) to use '
'for warmup (as a float)')
group.add_argument('--lr-warmup-iters', type=int, default=0,
help='number of iterations to linearly warmup '
'learning rate over.')
group.add_argument('--lr-warmup-samples', type=int, default=0,
help='number of samples to linearly warmup '
'learning rate over.')
group.add_argument('--warmup', type=int, default=None,
help='Old lr warmup argument, do not use. Use one of the'
'--lr-warmup-* arguments above')
group.add_argument('--min-lr', type=float, default=0.0,
help='Minumum value for learning rate. The scheduler'
'clip values below this threshold.')
group.add_argument('--override-lr-scheduler', action='store_true',
help='Reset the values of the scheduler (learning rate,'
'warmup iterations, minimum learning rate, maximum '
'number of iterations, and decay style from input '
'arguments and ignore values from checkpoints. Note'
'that all the above values will be reset.')
group.add_argument('--use-checkpoint-lr-scheduler', action='store_true',
help='Use checkpoint to set the values of the scheduler '
'(learning rate, warmup iterations, minimum learning '
'rate, maximum number of iterations, and decay style '
'from checkpoint and ignore input arguments.')
return parser
def _add_checkpointing_args(parser):
group = parser.add_argument_group(title='checkpointing')
group.add_argument('--save', type=str, default=None,
help='Output directory to save checkpoints to.')
group.add_argument('--save-interval', type=int, default=None,
help='Number of iterations between checkpoint saves.')
group.add_argument('--no-save-optim', action='store_true', default=None,
help='Do not save current optimizer.')
group.add_argument('--no-save-rng', action='store_true', default=None,
help='Do not save current rng state.')
group.add_argument('--load', type=str, default=None,
help='Directory containing a model checkpoint.')
group.add_argument('--no-load-optim', action='store_true', default=None,
help='Do not load optimizer when loading checkpoint.')
group.add_argument('--no-load-rng', action='store_true', default=None,
help='Do not load rng state when loading checkpoint.')
group.add_argument('--finetune', action='store_true',
help='Load model for finetuning. Do not load optimizer '
'or rng state from checkpoint and set iteration to 0. '
'Assumed when loading a release checkpoint.')
return parser
def _add_mixed_precision_args(parser):
group = parser.add_argument_group(title='mixed precision')
group.add_argument('--fp16', action='store_true',
help='Run model in fp16 mode.')
group.add_argument('--bf16', action='store_true',
help='Run model in bfloat16 mode.')
group.add_argument('--loss-scale', type=float, default=None,
help='Static loss scaling, positive power of 2 '
'values can improve fp16 convergence. If None, dynamic'
'loss scaling is used.')
group.add_argument('--initial-loss-scale', type=float, default=2**32,
help='Initial loss-scale for dynamic loss scaling.')
group.add_argument('--min-loss-scale', type=float, default=1.0,
help='Minimum loss scale for dynamic loss scale.')
group.add_argument('--loss-scale-window', type=float, default=1000,
help='Window over which to raise/lower dynamic scale.')
group.add_argument('--hysteresis', type=int, default=2,
help='hysteresis for dynamic loss scaling')
group.add_argument('--fp32-residual-connection', action='store_true',
help='Move residual connections to fp32.')
group.add_argument('--no-query-key-layer-scaling', action='store_false',
help='Do not scale Q * K^T by 1 / layer-number.',
dest='apply_query_key_layer_scaling')
group.add_argument('--attention-softmax-in-fp32', action='store_true',
help='Run attention masking and softmax in fp32. '
'This flag is ignored unless '
'--no-query-key-layer-scaling is specified.')
group.add_argument('--accumulate-allreduce-grads-in-fp32',
action='store_true',
help='Gradient accumulation and all-reduce in fp32.')
group.add_argument('--fp16-lm-cross-entropy', action='store_true',
help='Move the cross entropy unreduced loss calculation'
'for lm head to fp16.')
return parser
def _add_distributed_args(parser):
group = parser.add_argument_group(title='distributed')
group.add_argument('--tensor-model-parallel-size', type=int, default=1,
help='Degree of tensor model parallelism.')
group.add_argument('--pipeline-model-parallel-size', type=int, default=1,
help='Degree of pipeline model parallelism.')
group.add_argument('--pipeline-model-parallel-split-rank',
type=int, default=None,
help='Rank where encoder and decoder should be split.')
group.add_argument('--model-parallel-size', type=int, default=None,
help='Old model parallel argument, do not use. Use '
'--tensor-model-parallel-size instead.')
group.add_argument('--num-layers-per-virtual-pipeline-stage', type=int, default=None,
help='Number of layers per virtual pipeline stage')
group.add_argument('--distributed-backend', default='nccl',
choices=['nccl', 'gloo'],
help='Which backend to use for distributed training.')
group.add_argument('--DDP-impl', default='local',
choices=['local', 'torch'],
help='which DistributedDataParallel implementation '
'to use.')
group.add_argument('--no-contiguous-buffers-in-local-ddp',
action='store_false', help='If set, dont use '
'contiguous buffer in local DDP.',
dest='use_contiguous_buffers_in_local_ddp')
group.add_argument('--no-scatter-gather-tensors-in-pipeline', action='store_false',
help='Use scatter/gather to optimize communication of tensors in pipeline',
dest='scatter_gather_tensors_in_pipeline')
group.add_argument('--local_rank', type=int, default=None,
help='local rank passed from distributed launcher.')
group.add_argument('--lazy-mpu-init', type=bool, required=False,
help='If set to True, initialize_megatron() '
'skips DDP initialization and returns function to '
'complete it instead.Also turns on '
'--use-cpu-initialization flag. This is for '
'external DDP manager.' )
group.add_argument('--use-cpu-initialization', action='store_true',
default=None, help='If set, affine parallel weights '
'initialization uses CPU' )
group.add_argument('--empty-unused-memory-level', default=0, type=int,
choices=[0, 1, 2],
help='Call torch.cuda.empty_cache() each iteration '
'(training and eval), to reduce fragmentation.'
'0=off, 1=moderate, 2=aggressive.')
group.add_argument('--standalone-embedding-stage', action='store_true',
default=False, help='If set, *input* embedding layer '
'is placed on its own pipeline stage, without any '
'transformer layers. (For T5, this flag currently only '
'affects the encoder embedding.)')
return parser
def _add_validation_args(parser):
group = parser.add_argument_group(title='validation')
group.add_argument('--eval-iters', type=int, default=100,
help='Number of iterations to run for evaluation'
'validation/test for.')
group.add_argument('--eval-interval', type=int, default=1000,
help='Interval between running evaluation on '
'validation set.')
return parser
def _add_data_args(parser):
group = parser.add_argument_group(title='data and dataloader')
group.add_argument('--data-path', nargs='*', default=None,
help='Path to the training dataset. Accepted format:'
'1) a single data path, 2) multiple datasets in the'
'form: dataset1-weight dataset1-path dataset2-weight '
'dataset2-path ...')
group.add_argument('--split', type=str, default='969, 30, 1',
help='Comma-separated list of proportions for training,'
' validation, and test split. For example the split '
'`90,5,5` will use 90%% of data for training, 5%% for '
'validation and 5%% for test.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file.')
group.add_argument('--merge-file', type=str, default=None,
help='Path to the BPE merge file.')
group.add_argument('--vocab-extra-ids', type=int, default=0,
help='Number of additional vocabulary tokens. '
'They are used for span masking in the T5 model')
group.add_argument('--seq-length', type=int, default=None,
help='Maximum sequence length to process.')
group.add_argument('--encoder-seq-length', type=int, default=None,
help='Maximum encoder sequence length to process.'
'This should be exclusive of --seq-length')
group.add_argument('--decoder-seq-length', type=int, default=None,
help="Maximum decoder sequence length to process.")
group.add_argument('--retriever-seq-length', type=int, default=256,
help='Maximum sequence length for the biencoder model '
' for retriever')
group.add_argument('--sample-rate', type=float, default=1.0,
help='sample rate for training data. Supposed to be 0 '
' < sample_rate < 1')
group.add_argument('--mask-prob', type=float, default=0.15,
help='Probability of replacing a token with mask.')
group.add_argument('--short-seq-prob', type=float, default=0.1,
help='Probability of producing a short sequence.')
group.add_argument('--mmap-warmup', action='store_true',
help='Warm up mmap files.')
group.add_argument('--num-workers', type=int, default=2,
help="Dataloader number of workers.")
group.add_argument('--tokenizer-type', type=str,
default=None,
choices=['BertWordPieceLowerCase',
'BertWordPieceCase',
'GPT2BPETokenizer'],
help='What type of tokenizer to use.')
group.add_argument('--data-impl', type=str, default='infer',
choices=['lazy', 'cached', 'mmap', 'infer'],
help='Implementation of indexed datasets.')
group.add_argument('--reset-position-ids', action='store_true',
help='Reset posistion ids after end-of-document token.')
group.add_argument('--reset-attention-mask', action='store_true',
help='Reset self attention maske after '
'end-of-document token.')
group.add_argument('--eod-mask-loss', action='store_true',
help='Mask loss for the end of document tokens.')
return parser
def _add_autoresume_args(parser):
group = parser.add_argument_group(title='autoresume')
group.add_argument('--adlr-autoresume', action='store_true',
help='Enable autoresume on adlr cluster.')
group.add_argument('--adlr-autoresume-interval', type=int, default=1000,
help='Intervals over which check for autoresume'
'termination signal')
return parser
def _add_biencoder_args(parser):
group = parser.add_argument_group(title='biencoder')
# network size
group.add_argument('--ict-head-size', type=int, default=None,
help='Size of block embeddings to be used in ICT and '
'REALM (paper default: 128)')
group.add_argument('--biencoder-projection-dim', type=int, default=0,
help='Size of projection head used in biencoder (paper'
' default: 128)')
group.add_argument('--biencoder-shared-query-context-model', action='store_true',
help='Whether to share the parameters of the query '
'and context models or not')
# checkpointing
group.add_argument('--ict-load', type=str, default=None,
help='Directory containing an ICTBertModel checkpoint')
group.add_argument('--bert-load', type=str, default=None,
help='Directory containing an BertModel checkpoint '
'(needed to start ICT and REALM)')
# data
group.add_argument('--titles-data-path', type=str, default=None,
help='Path to titles dataset used for ICT')
group.add_argument('--query-in-block-prob', type=float, default=0.1,
help='Probability of keeping query in block for '
'ICT dataset')
group.add_argument('--use-one-sent-docs', action='store_true',
help='Whether to use one sentence documents in ICT')
group.add_argument('--evidence-data-path', type=str, default=None,
help='Path to Wikipedia Evidence frm DPR paper')
# training
group.add_argument('--retriever-report-topk-accuracies', nargs='+', type=int,
default=[], help="Which top-k accuracies to report "
"(e.g. '1 5 20')")
group.add_argument('--retriever-score-scaling', action='store_true',
help='Whether to scale retriever scores by inverse '
'square root of hidden size')
# faiss index
group.add_argument('--block-data-path', type=str, default=None,
help='Where to save/load BlockData to/from')
group.add_argument('--embedding-path', type=str, default=None,
help='Where to save/load Open-Retrieval Embedding'
' data to/from')
# indexer
group.add_argument('--indexer-batch-size', type=int, default=128,
help='How large of batches to use when doing indexing '
'jobs')
group.add_argument('--indexer-log-interval', type=int, default=1000,
help='After how many batches should the indexer '
'report progress')
return parser
def _add_vision_args(parser):
group = parser.add_argument_group(title="vision")
# general vision arguments
group.add_argument('--num-classes', type=int, default=1000,
help='num of classes in vision classificaiton task')
group.add_argument('--img-h', type=int, default=224,
help='Image height for vision classification task')
group.add_argument('--img-w', type=int, default=224,
help='Image height for vision classification task')
group.add_argument('--num-channels', type=int, default=3,
help='Number of channels in input image data')
group.add_argument('--patch-dim', type=int, default=16,
help='patch dimension')
group.add_argument('--classes-fraction', type=float, default=1.0,
help='training with fraction of classes.')
group.add_argument('--data-per-class-fraction', type=float, default=1.0,
help='training with fraction of data per class.')
group.add_argument('--no-data-sharding', action='store_false',
help='Disable data sharding.',
dest='data_sharding')
group.add_argument('--head-lr-mult', type=float, default=1.0,
help='learning rate multiplier for head during finetuning')
# pretraining type and backbone selection`
group.add_argument('--vision-pretraining', action='store_true',
help='flag to indicate vision pretraining')
group.add_argument('--vision-pretraining-type', type=str, default='classify',
choices=['classify', 'inpaint', 'dino'],
help='pretraining objectives')
group.add_argument('--vision-backbone-type', type=str, default='vit',
choices=['vit', 'mit', 'swin'],
help='backbone types types')
group.add_argument('--swin-backbone-type', type=str, default='tiny',
choices=['tiny', 'base', 'h3'],
help='pretraining objectives')
# inpainting arguments
group.add_argument('--mask-type', type=str, default='random',
choices=['random', 'row'],
help='mask types')
group.add_argument('--mask-factor', type=float, default=1.0,
help='mask size scaling parameter')
# dino arguments
group.add_argument('--iter-per-epoch', type=int, default=1250,
help='iterations per epoch')
group.add_argument('--dino-local-img-size', type=int, default=96,
help='Image size for vision classification task')
group.add_argument('--dino-local-crops-number', type=int, default=10,
help='Number of local crops')
group.add_argument('--dino-head-hidden-size', type=int, default=2048,
help='Hidden dimension size in dino head')
group.add_argument('--dino-bottleneck-size', type=int, default=256,
help='Bottle neck dimension in dino head ')
group.add_argument('--dino-freeze-last-layer', type=float, default=1,
help='Freezing last layer weights')
group.add_argument('--dino-norm-last-layer', action='store_true',
help='Disable Norm in last layer.')
group.add_argument('--dino-warmup-teacher-temp', type=float, default=0.04,
help='warump teacher temperature')
group.add_argument('--dino-teacher-temp', type=float, default=0.07,
help='teacher temperature')
group.add_argument('--dino-warmup-teacher-temp-epochs', type=int, default=30,
help='warmup teacher temperaure epochs')
return parser
| GeneSplice-main | GeneSplice/apex/apex/transformer/testing/arguments.py |
GeneSplice-main | GeneSplice/apex/apex/transformer/testing/__init__.py |
|
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
import datetime
import os
import random
from typing import Optional, Union, List, Tuple, Callable, Dict
import numpy
import torch
import torch.nn as nn
from apex import transformer
from apex.transformer.tensor_parallel import(
ColumnParallelLinear,
RowParallelLinear,
scatter_to_sequence_parallel_region,
)
from apex.transformer.pipeline_parallel.utils import (
average_losses_across_data_parallel_group,
)
from apex.transformer.pipeline_parallel.schedules.common import (
Batch,
)
from apex.transformer.testing import global_vars
from apex.transformer._ucc_util import HAS_UCC
TEST_SUCCESS_MESSAGE = ">> passed the test :-)"
# note (mkozuki): `pre_process` and `post_process` are a placeholder until interleaving schedule test comes.
class MyLayer(nn.Module):
def __init__(self, hidden_size: int, pre_process: bool, post_process: bool):
super().__init__()
self.pre_process = pre_process
self.post_process = post_process
self.layer = nn.Linear(hidden_size, hidden_size)
def forward(self, x):
return self.layer(x)
class MyModel(nn.Module):
def __init__(
self,
hidden_size: int, pre_process: bool = False, post_process: bool = False,
*,
add_encoder: bool = False, add_decoder: bool = False,
) -> None:
super().__init__()
self.pre_process = pre_process
self.post_process = post_process
self.layer = MyLayer(
hidden_size=hidden_size, pre_process=pre_process, post_process=post_process
)
self.input_tensor = None
def set_input_tensor(
self, input_tensor: Union[torch.Tensor, List[torch.Tensor]]
) -> None:
if not isinstance(input_tensor, list):
input_tensor = [input_tensor]
self.input_tensor = input_tensor[0]
def forward(self, x: Optional[torch.Tensor]) -> torch.Tensor:
if self.input_tensor is None:
return self.layer(x)
return self.layer(self.input_tensor)
class ToyParallelMLP(nn.Module):
def __init__(
self,
hidden_size: int, pre_process: bool = False, post_process: bool = False,
*,
sequence_parallel_enabled: bool = False,
# TODO(mkozuki): Support these two?
add_encoder: bool = False, add_decoder: bool = False,
) -> None:
super().__init__()
self.pre_process = pre_process
self.post_process = post_process
self.sequence_parallel_enabled = sequence_parallel_enabled
ffn_hidden_size = 4 * hidden_size
self.dense_h_to_4h = ColumnParallelLinear(
hidden_size,
ffn_hidden_size,
gather_output=False,
# init_method=init_method,
skip_bias_add=True,
# use_cpu_initialization=use_cpu_initialization,
bias=True,
sequence_parallel_enabled=sequence_parallel_enabled,
no_async_tensor_model_parallel_allreduce=True,
)
self.dense_4h_to_h = RowParallelLinear(
ffn_hidden_size,
hidden_size,
input_is_parallel=True,
# init_method=output_layer_init_method,
skip_bias_add=False,
# use_cpu_initialization=use_cpu_initialization,
bias=True,
sequence_parallel_enabled=sequence_parallel_enabled,
)
self.activation_func = torch.nn.GELU()
def set_input_tensor(
self,
input_tensor: Union[torch.Tensor, List[torch.Tensor]],
) -> None:
if not isinstance(input_tensor, list):
input_tensor = [input_tensor]
self.input_tensor = input_tensor[0]
def forward(
self,
x: Optional[torch.Tensor],
) -> torch.Tensor:
"""Forward of Simplified ParallelMLP.
Args:
x: :obj:`None` if pipeline rank != pippeline first rank. When :obj:`None`,
`self.input_tensor` is taken care of by `forward_step` defined in
apex/transformer/pipeline_parallel/schedules/common.py
"""
# [s, b, h]
if self.input_tensor is None:
input = x
else:
input = self.input_tensor
intermediate_parallel, bias_parallel = self.dense_h_to_4h(input)
if bias_parallel is not None:
intermediate_parallel += bias_parallel
intermediate_parallel = self.activation_func(intermediate_parallel)
# [s, b, h]
output, output_bias = self.dense_4h_to_h(intermediate_parallel)
return output
def model_provider_func(
hidden_size: int,
pre_process: bool,
post_process: bool,
*,
add_encoder: bool = False,
add_decoder: bool = False) -> MyModel:
return MyModel(hidden_size, pre_process, post_process, add_encoder=add_encoder, add_decoder=add_decoder)
def mlp_provider_func(
hidden_size: int,
pre_process: bool,
post_process: bool,
*,
add_encoder: bool = False,
add_decoder: bool = False,
sequence_parallel_enabled: bool = False,
) -> ToyParallelMLP:
return ToyParallelMLP(
hidden_size,
pre_process,
post_process,
add_encoder=add_encoder,
add_decoder=add_decoder,
sequence_parallel_enabled=sequence_parallel_enabled,
)
def process_batch(batch):
if isinstance(batch, list):
x = batch[0]
else:
x = batch
return x
def fwd_step_func(batch, model):
x = process_batch(batch)
y = model(x)
# note (mkozuki): I don't think this function is nice but I do think this is enough for now
# just to check the sanity of ported pipeline functions.
def loss_func(x):
loss = torch.sum(x)
averaged_loss = average_losses_across_data_parallel_group([loss])
return loss, {"avg": averaged_loss}
return y, loss_func
@dataclass(frozen=True)
class ToyParallelMLPFwdBwdStepFunc:
sequence_parallel_enabled: bool
def __call__(
self,
batch: Batch,
model: torch.nn.Module,
) -> Tuple[torch.Tensor, Callable[[torch.Tensor], Tuple[torch.Tensor, Dict[str, torch.Tensor]]]]:
x = batch[0] if isinstance(batch, list) else batch
if isinstance(x, torch.Tensor):
x = x.transpose(0, 1).contiguous()
if self.sequence_parallel_enabled:
x = scatter_to_sequence_parallel_region(x)
y = model(x)
# note (mkozuki): I don't think this function is nice but I do think this is enough for now
# just to check the sanity of ported pipeline functions.
def loss_func(x):
loss = torch.sum(x)
averaged_loss = average_losses_across_data_parallel_group([loss])
return loss, {"avg": averaged_loss}
return y, loss_func
class IdentityLayer(torch.nn.Module):
def __init__(self, size, scale=1.0):
super(IdentityLayer, self).__init__()
self.weight = torch.nn.Parameter(scale * torch.randn(size))
def forward(self):
return self.weight
def set_random_seed(seed):
"""Set random seed for reproducibility."""
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
transformer.tensor_parallel.model_parallel_cuda_manual_seed(seed)
def initialize_distributed(backend="nccl"):
"""Initialize torch.distributed."""
# Get local rank in case it is provided.
# parser = argparse.ArgumentParser()
# parser.add_argument('--local_rank', type=int, default=None,
# help='local rank passed from distributed launcher')
# args = parser.parse_args()
if backend not in ("nccl", "ucc"):
raise RuntimeError(f"Currently only nccl & ucc are supported but {backend}")
if backend == "ucc":
if not HAS_UCC:
raise ImportError("UCC backend requires pytorch source build with UCC installed and enabled")
args = global_vars.get_args()
local_rank = args.local_rank
# Get rank and world size.
rank = int(os.getenv("RANK", "0"))
world_size = int(os.getenv("WORLD_SIZE", "1"))
print(
"> initializing torch.distributed with local rank: {}, "
"rank: {}, world size: {}".format(local_rank, rank, world_size)
)
# Set the device id.
device = rank % torch.cuda.device_count()
if local_rank is not None:
device = local_rank
torch.cuda.set_device(device)
# Call the init process.
init_method = "tcp://"
master_ip = os.getenv("MASTER_ADDR", "localhost")
master_port = os.getenv("MASTER_PORT", "6000")
init_method += master_ip + ":" + master_port
torch.distributed.init_process_group(
backend=backend, world_size=world_size, rank=rank, init_method=init_method,
timeout=datetime.timedelta(seconds=60),
)
def print_separator(message):
filler_len = (78 - len(message)) // 2
filler = "-" * filler_len
string = "\n" + filler + " {} ".format(message) + filler
if torch.distributed.get_rank() == 0:
print(string, flush=True)
| GeneSplice-main | GeneSplice/apex/apex/transformer/testing/commons.py |
import contextlib
import torch
from apex.transformer import tensor_parallel
from apex.transformer.enums import AttnMaskType
from apex.transformer.enums import ModelType
from apex.transformer.layers import FusedLayerNorm as LayerNorm
from apex.transformer.testing.global_vars import get_args
from apex.transformer.testing.standalone_transformer_lm import (
MegatronModule,
get_language_model,
get_linear_layer,
init_method_normal,
scaled_init_method_normal,
parallel_lm_logits,
)
def bert_extended_attention_mask(attention_mask):
# We create a 3D attention mask from a 2D tensor mask.
# [b, 1, s]
attention_mask_b1s = attention_mask.unsqueeze(1)
# [b, s, 1]
attention_mask_bs1 = attention_mask.unsqueeze(2)
# [b, s, s]
attention_mask_bss = attention_mask_b1s * attention_mask_bs1
# [b, 1, s, s]
extended_attention_mask = attention_mask_bss.unsqueeze(1)
# Convert attention mask to binary:
extended_attention_mask = (extended_attention_mask < 0.5)
return extended_attention_mask
def bert_position_ids(token_ids):
# Create position ids
seq_length = token_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long,
device=token_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(token_ids)
return position_ids
class BertLMHead(MegatronModule):
"""Masked LM head for Bert
Arguments:
mpu_vocab_size: model parallel size of vocabulary.
hidden_size: hidden size
init_method: init method for weight initialization
layernorm_epsilon: tolerance for layer norm divisions
parallel_output: whether output logits being distributed or not.
"""
def __init__(self, mpu_vocab_size, hidden_size, init_method,
layernorm_epsilon, parallel_output):
super(BertLMHead, self).__init__()
args = get_args()
self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size))
# TODO: do we need this?
# mpu.set_tensor_model_parallel_attributes(self.bias, True, 0, 1)
self.parallel_output = parallel_output
self.dense = get_linear_layer(hidden_size, hidden_size, init_method)
setattr(self.dense.weight, 'sequence_parallel', args.sequence_parallel)
setattr(self.dense.bias, 'sequence_parallel', args.sequence_parallel)
self.layernorm = LayerNorm(
hidden_size, eps=layernorm_epsilon, sequence_parallel_enabled=args.sequence_parallel)
self.gelu = torch.nn.functional.gelu
if args.openai_gelu:
self.gelu = openai_gelu
elif args.onnx_safe:
self.gelu = erf_gelu
def forward(self, hidden_states, word_embeddings_weight):
hidden_states = self.dense(hidden_states)
hidden_states = self.gelu(hidden_states)
hidden_states = self.layernorm(hidden_states)
output = parallel_lm_logits(hidden_states,
word_embeddings_weight,
self.parallel_output,
bias=self.bias)
return output
def post_language_model_processing(lm_output, pooled_output,
lm_head, binary_head,
lm_labels,
logit_weights,
fp16_lm_cross_entropy):
# Output.
lm_logits = lm_head(
lm_output, logit_weights)
binary_logits = None
if binary_head is not None:
binary_logits = binary_head(pooled_output)
if lm_labels is None:
# [s b h] => [b s h]
return lm_logits.transpose(0, 1).contiguous(), binary_logits
else:
# [b s] => [s b]
lm_labels = lm_labels.transpose(0, 1).contiguous()
# lm_logits: [s b h] lm_labels: [s b]
if fp16_lm_cross_entropy:
assert lm_logits.dtype == torch.half
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits, lm_labels)
else:
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits.float(),
lm_labels)
return lm_loss, binary_logits
class BertModel(MegatronModule):
"""Bert Language model."""
def __init__(self,
num_tokentypes=2,
add_binary_head=True,
parallel_output=True,
pre_process=True,
post_process=True,
cpu_offload=False):
super(BertModel, self).__init__()
args = get_args()
self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
self.add_binary_head = add_binary_head
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
init_method = init_method_normal(args.init_method_std)
scaled_init_method = scaled_init_method_normal(args.init_method_std,
args.num_layers)
self.language_model, self._language_model_key = get_language_model(
num_tokentypes=num_tokentypes,
add_pooler=self.add_binary_head,
encoder_attn_mask_type=AttnMaskType.padding,
init_method=init_method,
scaled_init_method=scaled_init_method,
pre_process=self.pre_process,
post_process=self.post_process)
self.initialize_word_embeddings(init_method_normal)
if self.post_process:
self.lm_head = BertLMHead(
self.word_embeddings_weight().size(0),
args.hidden_size, init_method, args.layernorm_epsilon, parallel_output)
self._lm_head_key = 'lm_head'
self.binary_head = None
if self.add_binary_head:
self.binary_head = get_linear_layer(args.hidden_size, 2,
init_method)
self._binary_head_key = 'binary_head'
self.forward_context = contextlib.nullcontext
if cpu_offload:
self.forward_context = torch.autograd.graph.save_on_cpu
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(self, bert_model_input, attention_mask,
tokentype_ids=None, lm_labels=None):
with self.forward_context():
extended_attention_mask = bert_extended_attention_mask(attention_mask)
input_ids = bert_model_input
position_ids = bert_position_ids(input_ids)
lm_output = self.language_model(
input_ids,
position_ids,
extended_attention_mask,
tokentype_ids=tokentype_ids
)
if self.post_process and self.add_binary_head:
lm_output, pooled_output = lm_output
else:
pooled_output = None
if self.post_process:
return post_language_model_processing(lm_output, pooled_output,
self.lm_head, self.binary_head,
lm_labels,
self.word_embeddings_weight(),
self.fp16_lm_cross_entropy)
else:
return lm_output
# NOTE(mkozuki): This method is not maintained as apex only tests forward_backward with best effort.
def state_dict_for_save_checkpoint(self, destination=None, prefix='',
keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(
destination, prefix, keep_vars)
if self.post_process:
state_dict_[self._lm_head_key] \
= self.lm_head.state_dict_for_save_checkpoint(
destination, prefix, keep_vars)
if self.post_process and self.add_binary_head:
state_dict_[self._binary_head_key] \
= self.binary_head.state_dict(destination, prefix, keep_vars)
# Save word_embeddings.
if self.post_process and not self.pre_process:
state_dict_[self._word_embeddings_for_head_key] \
= self.word_embeddings.state_dict(destination, prefix, keep_vars)
return state_dict_
# NOTE(mkozuki): This method is not maintained as apex only tests forward_backward with best effort.
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.language_model.load_state_dict(
state_dict[self._language_model_key], strict=strict)
if self.post_process:
self.lm_head.load_state_dict(
state_dict[self._lm_head_key], strict=strict)
if self.post_process and self.add_binary_head:
self.binary_head.load_state_dict(
state_dict[self._binary_head_key], strict=strict)
# Load word_embeddings.
if self.post_process and not self.pre_process:
self.word_embeddings.load_state_dict(
state_dict[self._word_embeddings_for_head_key], strict=strict)
def bert_model_provider(pre_process=True, post_process=True, cpu_offload=False):
args = get_args()
num_tokentypes = 2 if args.bert_binary_head else 0
model = BertModel(
num_tokentypes=num_tokentypes,
add_binary_head=args.bert_binary_head,
parallel_output=True,
pre_process=pre_process,
post_process=post_process,
cpu_offload=cpu_offload,
)
return model
| GeneSplice-main | GeneSplice/apex/apex/transformer/testing/standalone_bert.py |
import os
import sys
import unittest
from packaging.version import Version, parse
import torch
from torch import distributed as dist
from torch.utils import collect_env
from torch.testing._internal import common_utils
from torch.testing._internal import common_distributed
from apex.transformer._ucc_util import HAS_UCC
# NOTE(mkozuki): Version guard for ucc. ref: https://github.com/openucx/ucc/issues/496
_TORCH_UCC_COMPAT_NVIDIA_DRIVER_VERSION = Version("470.42.01")
_driver_version = None
if torch.cuda.is_available():
_driver_version = parse(collect_env.get_nvidia_driver_version(collect_env.run))
HAS_TORCH_UCC_COMPAT_NVIDIA_DRIVER = _driver_version is not None and _driver_version >= _TORCH_UCC_COMPAT_NVIDIA_DRIVER_VERSION
class DistributedTestBase(common_distributed.MultiProcessTestCase):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def setUp(self) -> None:
super().setUp()
self._setup_pre_spawn()
self._spawn_processes()
def tearDown(self) -> None:
torch.cuda.empty_cache()
super().tearDown()
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 4)
@property
def init_method(self):
return f"{common_utils.FILE_SCHEMA}{self.file_name}"
@classmethod
def _run(cls, rank, test_name, file_name, pipe):
self = cls(test_name)
self.assertTrue(torch.cuda.is_available())
self.assertTrue(hasattr(self, "DISTRIBUTED_BACKEND"))
self.rank = rank
self.file_name = file_name
print(f"[dist init] rank = {self.rank}, world_size = {self.world_size}")
try:
dist.init_process_group(
init_method=self.init_method,
backend=self.DISTRIBUTED_BACKEND,
world_size=int(self.world_size),
rank=self.rank,
)
except RuntimeError as e:
if "recompile" in e.args[0]:
print(f"Backend of {self.DISTRIBUTED_BACKEND} not available")
sys.exit(0)
raise
torch.cuda.set_device(self.rank % torch.cuda.device_count())
dist.barrier()
self.run_test(test_name, pipe)
dist.barrier()
dist.destroy_process_group()
sys.exit(0)
def _setup_pre_spawn(self):
pass
class NcclDistributedTestBase(DistributedTestBase):
DISTRIBUTED_BACKEND = "nccl"
@unittest.skipUnless(
HAS_UCC,
"Requires either torch ucc or pytorch build from source with native ucc installed and enabled",
)
@unittest.skipUnless(
HAS_TORCH_UCC_COMPAT_NVIDIA_DRIVER,
f"`torch_ucc` requires NVIDIA driver >= {_TORCH_UCC_COMPAT_NVIDIA_DRIVER_VERSION} but {_driver_version} found. "
"See https://github.com/openucx/ucc/issues/496",
)
class UccDistributedTestBase(DistributedTestBase):
DISTRIBUTED_BACKEND = "ucc"
def _setup_pre_spawn(self) -> None:
self.master_addr = "localhost"
os.environ["MASTER_ADDR"] = "localhost"
self._has_master_port = "MASTER_PORT" in os.environ
if self._has_master_port:
self.master_port = os.environ["MASTER_PORT"]
else:
try:
from caffe2.torch.fb.common.utils import get_free_port
self.master_port = str(get_free_port())
except ImportError:
self.master_port = "12375"
os.environ["MASTER_PORT"] = self.master_port
self._has_ucx_tls = "UCX_TLS" in os.environ
if not self._has_ucx_tls:
os.environ["UCX_TLS"] = "tcp,cuda"
print('os.environ[\"UCX_TLS\"] = {}'.format(os.environ["UCX_TLS"]))
def tearDown(self) -> None:
super().tearDown()
if not self._has_master_port:
del os.environ["MASTER_PORT"]
if not self._has_ucx_tls:
del os.environ["UCX_TLS"]
@property
def init_method(self):
return "tcp://localhost:" + os.environ["MASTER_PORT"]
| GeneSplice-main | GeneSplice/apex/apex/transformer/testing/distributed_test_base.py |
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import torch
from apex.transformer.enums import AttnMaskType
from apex.transformer.enums import ModelType
from apex.transformer import tensor_parallel
from apex.transformer.testing.global_vars import get_args
from apex.transformer.testing.standalone_transformer_lm import MegatronModule
from apex.transformer.testing.standalone_transformer_lm import parallel_lm_logits
from apex.transformer.testing.standalone_transformer_lm import post_language_model_processing
from apex.transformer.testing.standalone_transformer_lm import get_language_model
from apex.transformer.testing.standalone_transformer_lm import init_method_normal
from apex.transformer.testing.standalone_transformer_lm import (
scaled_init_method_normal,
)
def gpt_model_provider(pre_process: bool = True, post_process: bool = True, cpu_offload: bool = False,) -> "GPTModel":
args = get_args()
model = GPTModel(
num_tokentypes=0,
parallel_output=True,
pre_process=pre_process,
post_process=post_process,
cpu_offload=args.cpu_offload,
)
return model
class GPTModel(MegatronModule):
"""GPT-2 Language model."""
def __init__(
self,
num_tokentypes:int = 0,
parallel_output: bool = True,
pre_process: bool = True,
post_process: bool = True,
cpu_offload: bool = False,
):
super().__init__()
args = get_args()
self.forward_context = contextlib.nullcontext
if cpu_offload:
self.forward_context = torch.autograd.graph.save_on_cpu
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
self.language_model, self._language_model_key = get_language_model(
num_tokentypes=num_tokentypes,
add_pooler=False,
encoder_attn_mask_type=AttnMaskType.causal,
init_method=init_method_normal(args.init_method_std),
scaled_init_method=scaled_init_method_normal(
args.init_method_std, args.num_layers
),
pre_process=self.pre_process,
post_process=self.post_process,
)
self.initialize_word_embeddings(init_method_normal)
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(
self,
input_ids,
position_ids,
attention_mask,
labels=None,
tokentype_ids=None,
inference_params=None,
):
with self.forward_context():
lm_output = self.language_model(
input_ids, position_ids, attention_mask, inference_params=inference_params
)
if self.post_process:
return post_language_model_processing(
lm_output,
# note(mkozuki): Am I overlooking some order of dim change?
labels.t().contiguous(),
self.word_embeddings_weight(),
self.parallel_output,
self.fp16_lm_cross_entropy,
)
else:
return lm_output
| GeneSplice-main | GeneSplice/apex/apex/transformer/testing/standalone_gpt.py |
# coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GPT-2 model."""
import enum
import math
import contextlib
import json
import torch
import torch.nn.functional as F
import apex.transformer.utils
from apex.transformer.layers import FusedLayerNorm as LayerNorm
from apex.transformer.functional import FusedScaleMaskSoftmax
from apex.transformer import tensor_parallel
from apex.transformer.tensor_parallel.layers import ColumnParallelLinear
from apex.transformer.tensor_parallel.layers import RowParallelLinear
from apex.transformer.tensor_parallel.layers import VocabParallelEmbedding
from apex.transformer.tensor_parallel.mappings import scatter_to_sequence_parallel_region
from apex.transformer import parallel_state
from apex.transformer.testing.global_vars import get_args
from apex.transformer.enums import ModelType
from apex.transformer.enums import LayerType
from apex.transformer.enums import AttnType
from apex.transformer.enums import AttnMaskType
from apex.transformer.log_util import get_transformer_logger
_logger = get_transformer_logger(__name__)
def param_is_not_shared(param: torch.Tensor) -> bool:
return getattr(param, "shared", False)
class MegatronModule(torch.nn.Module):
"""Megatron specific extensions of torch Module with support for pipelining."""
def __init__(self, share_word_embeddings: bool = True) -> None:
super().__init__()
self.share_word_embeddings = share_word_embeddings
def word_embeddings_weight(self):
if self.pre_process:
return self.language_model.embedding.word_embeddings.weight
else:
if not self.share_word_embeddings:
raise Exception('word_embeddings_weight() called for last stage, but share_word_embeddings is false')
return self.word_embeddings.weight
def initialize_word_embeddings(self, init_method_normal):
args = get_args()
if not self.share_word_embeddings:
raise Exception("initialize_word_embeddings() was called but share_word_embeddings is false")
# This function just initializes the word embeddings in the final stage
# when we are using pipeline parallelism. Nothing to do if we aren't
# using pipeline parallelism.
if args.pipeline_model_parallel_size == 1:
return
# Parameters are shared between the word embeddings layers, and the
# heads at the end of the model. In a pipelined setup with more than
# one stage, the initial embedding layer and the head are on different
# workers, so we do the following:
# 1. Create a second copy of word_embeddings on the last stage, with
# initial parameters of 0.0.
# 2. Do an all-reduce between the first and last stage to ensure that
# the two copies of word_embeddings start off with the same
# parameter values.
# 3. In the training loop, before an all-reduce between the grads of
# the two word_embeddings layers to ensure that every applied weight
# update is the same on both stages.
if parallel_state.is_pipeline_last_stage() and not self.pre_process:
assert not parallel_state.is_pipeline_first_stage()
self._word_embeddings_for_head_key = 'word_embeddings_for_head'
# set word_embeddings weights to 0 here, then copy first
# stage's weights using all_reduce below.
self.word_embeddings = VocabParallelEmbedding(
args.padded_vocab_size, args.hidden_size,
init_method=init_method_normal(args.init_method_std))
self.word_embeddings.weight.data.fill_(0)
self.word_embeddings.weight.shared = True
# Zero out initial weights for decoder embedding.
# NOTE: We don't currently support T5 with the interleaved schedule.
if not parallel_state.is_pipeline_first_stage(ignore_virtual=True) and self.pre_process:
self.language_model.embedding.zero_parameters()
# Ensure that first and last stages have the same initial parameter
# values.
if torch.distributed.is_initialized():
if parallel_state.is_rank_in_embedding_group():
torch.distributed.all_reduce(self.word_embeddings_weight(),
group=parallel_state.get_embedding_group())
# Ensure that encoder(first stage) and decoder(split stage) position
# embeddings have the same initial parameter values
# NOTE: We don't currently support T5 with the interleaved schedule.
if parallel_state.is_rank_in_position_embedding_group() and \
args.pipeline_model_parallel_split_rank is not None:
# TODO: Support tokentype embedding.
self.language_model.embedding.cuda()
position_embeddings = self.language_model.embedding.position_embeddings
torch.distributed.all_reduce(position_embeddings.weight,
group=parallel_state.get_position_embedding_group())
else:
print("WARNING! Distributed processes aren't initialized, so "
"word embeddings in the last layer are not initialized. "
"If you are just manipulating a model this is fine, but "
"this needs to be handled manually. If you are training "
"something is definitely wrong.")
def get_linear_layer(rows, columns, init_method):
"""Simple linear layer with weight initialization."""
layer = torch.nn.Linear(rows, columns)
init_method(layer.weight)
with torch.no_grad():
layer.bias.zero_()
return layer
# NOTE(mkozuki): Avoid inplace op.
def attention_mask_func(attention_scores: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
# attention_scores.masked_fill_(attention_mask, -10000.0)
# return attention_scores
return attention_scores.masked_fill(attention_mask, -10000.0)
def init_method_normal(sigma):
"""Init method based on N(0, sigma)."""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
return init_
def scaled_init_method_normal(sigma, num_layers):
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
std = sigma / math.sqrt(2.0 * num_layers)
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
class ParallelMLP(MegatronModule):
"""MLP.
MLP will take the input with h hidden state, project it to 4*h
hidden dimension, perform nonlinear transformation, and project the
state back into h hidden dimension.
"""
def __init__(self, init_method, output_layer_init_method):
super().__init__()
args = get_args()
# Project to 4h.
self.dense_h_to_4h = ColumnParallelLinear(
args.hidden_size,
args.ffn_hidden_size,
gather_output=False,
init_method=init_method,
skip_bias_add=True,
no_async_tensor_model_parallel_allreduce=not args.async_tensor_model_parallel_allreduce,
sequence_parallel_enabled=args.sequence_parallel,
)
self.bias_gelu_fusion = args.bias_gelu_fusion
self.activation_func = F.gelu
# Project back to h.
self.dense_4h_to_h = RowParallelLinear(
args.ffn_hidden_size,
args.hidden_size,
input_is_parallel=True,
init_method=output_layer_init_method,
skip_bias_add=True,
sequence_parallel_enabled=args.sequence_parallel,
)
def forward(self, hidden_states):
# [s, b, 4hp]
intermediate_parallel, bias_parallel = self.dense_h_to_4h(hidden_states)
intermediate_parallel = self.activation_func(intermediate_parallel + bias_parallel)
# [s, b, h]
output, output_bias = self.dense_4h_to_h(intermediate_parallel)
return output, output_bias
class CoreAttention(MegatronModule):
def __init__(self, layer_number, attn_mask_type=AttnMaskType.padding):
super().__init__()
args = get_args()
self.fp16 = args.fp16
self.bf16 = args.bf16
self.apply_query_key_layer_scaling = args.apply_query_key_layer_scaling
self.attention_softmax_in_fp32 = args.attention_softmax_in_fp32
if self.apply_query_key_layer_scaling:
self.attention_softmax_in_fp32 = True
self.layer_number = max(1, layer_number)
self.attn_mask_type = attn_mask_type
self.sequence_parallel = args.sequence_parallel
projection_size = args.kv_channels * args.num_attention_heads
# Per attention head and per partition values.
world_size = parallel_state.get_tensor_model_parallel_world_size()
self.hidden_size_per_partition = apex.transformer.utils.divide(
projection_size, world_size
)
self.hidden_size_per_attention_head = apex.transformer.utils.divide(
projection_size, args.num_attention_heads
)
self.num_attention_heads_per_partition = apex.transformer.utils.divide(
args.num_attention_heads, world_size
)
coeff = None
self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
if self.apply_query_key_layer_scaling:
coeff = self.layer_number
self.norm_factor *= coeff
self.scale_mask_softmax = FusedScaleMaskSoftmax(
self.fp16,
self.bf16,
self.attn_mask_type,
args.masked_softmax_fusion,
attention_mask_func,
self.attention_softmax_in_fp32,
coeff,
)
# Dropout. Note that for a single iteration, this layer will generate
# different outputs on different number of parallel partitions but
# on average it should not be partition dependent.
self.attention_dropout = torch.nn.Dropout(args.attention_dropout)
def forward(self, query_layer, key_layer, value_layer, attention_mask):
# ===================================
# Raw attention scores. [b, np, s, s]
# ===================================
# [b, np, sq, sk]
output_size = (
query_layer.size(1),
query_layer.size(2),
query_layer.size(0),
key_layer.size(0),
)
# [sq, b, np, hn] -> [sq, b * np, hn]
query_layer = query_layer.view(
output_size[2], output_size[0] * output_size[1], -1
)
# [sk, b, np, hn] -> [sk, b * np, hn]
key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
# preallocting input tensor: [b * np, sq, sk]
matmul_input_buffer = torch.empty(
output_size[0] * output_size[1],
output_size[2],
output_size[3],
dtype=query_layer.dtype,
device=torch.cuda.current_device(),
)
# Raw attention scores. [b * np, sq, sk]
matmul_result = torch.baddbmm(
matmul_input_buffer,
query_layer.transpose(0, 1), # [b * np, sq, hn]
key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
beta=0.0,
alpha=(1.0 / self.norm_factor),
)
# change view to [b, np, sq, sk]
attention_scores = matmul_result.view(*output_size)
# ===========================
# Attention probs and dropout
# ===========================
# attention scores and attention mask [b, np, sq, sk]
attention_probs = self.scale_mask_softmax(attention_scores, attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
if not self.sequence_parallel:
with tensor_parallel.get_cuda_rng_tracker().fork():
attention_probs = self.attention_dropout(attention_probs)
else:
attention_probs = self.attention_dropout(attention_probs)
# =========================
# Context layer. [sq, b, hp]
# =========================
# value_layer -> context layer.
# [sk, b, np, hn] --> [b, np, sq, hn]
# context layer shape: [b, np, sq, hn]
output_size = (
value_layer.size(1),
value_layer.size(2),
query_layer.size(0),
value_layer.size(3),
)
# change view [sk, b * np, hn]
value_layer = value_layer.view(
value_layer.size(0), output_size[0] * output_size[1], -1
)
# change view [b * np, sq, sk]
attention_probs = attention_probs.view(
output_size[0] * output_size[1], output_size[2], -1
)
# matmul: [b * np, sq, hn]
context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
# change view [b, np, sq, hn]
context_layer = context_layer.view(*output_size)
# [b, np, sq, hn] --> [sq, b, np, hn]
context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
# [sq, b, np, hn] --> [sq, b, hp]
new_context_layer_shape = context_layer.size()[:-2] + (
self.hidden_size_per_partition,
)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class ParallelAttention(MegatronModule):
"""Parallel self-attention layer abstract class.
Self-attention layer takes input with size [b, s, h]
and returns output of the same size.
"""
def __init__(
self,
init_method,
output_layer_init_method,
layer_number,
attention_type=AttnType.self_attn,
attn_mask_type=AttnMaskType.padding,
):
super().__init__()
args = get_args()
self.layer_number = max(1, layer_number)
self.attention_type = attention_type
self.attn_mask_type = attn_mask_type
self.params_dtype = args.params_dtype
projection_size = args.kv_channels * args.num_attention_heads
# Per attention head and per partition values.
world_size = parallel_state.get_tensor_model_parallel_world_size()
self.hidden_size_per_attention_head = apex.transformer.utils.divide(
projection_size, args.num_attention_heads
)
self.num_attention_heads_per_partition = apex.transformer.utils.divide(
args.num_attention_heads, world_size
)
# Strided linear layer.
if attention_type == AttnType.self_attn:
self.query_key_value = ColumnParallelLinear(
args.hidden_size,
3 * projection_size,
gather_output=False,
init_method=init_method,
no_async_tensor_model_parallel_allreduce=not args.async_tensor_model_parallel_allreduce,
sequence_parallel_enabled=args.sequence_parallel,
)
else:
assert attention_type == AttnType.cross_attn
self.query = ColumnParallelLinear(
args.hidden_size,
projection_size,
gather_output=False,
init_method=init_method,
no_async_tensor_model_parallel_allreduce=not args.async_tensor_model_parallel_allreduce,
sequence_parallel_enabled=args.sequence_parallel,
)
self.key_value = ColumnParallelLinear(
args.hidden_size,
2 * projection_size,
gather_output=False,
init_method=init_method,
no_async_tensor_model_parallel_allreduce=not args.async_tensor_model_parallel_allreduce,
sequence_parallel_enabled=args.sequence_parallel,
)
self.core_attention = CoreAttention(self.layer_number, self.attn_mask_type)
self.checkpoint_core_attention = args.recompute_granularity == "selective"
# Output.
self.dense = RowParallelLinear(
projection_size,
args.hidden_size,
input_is_parallel=True,
init_method=output_layer_init_method,
skip_bias_add=True,
sequence_parallel_enabled=args.sequence_parallel,
)
def _checkpointed_attention_forward(
self, query_layer, key_layer, value_layer, attention_mask
):
"""Forward method with activation checkpointing."""
def custom_forward(*inputs):
query_layer = inputs[0]
key_layer = inputs[1]
value_layer = inputs[2]
attention_mask = inputs[3]
output_ = self.core_attention(
query_layer, key_layer, value_layer, attention_mask
)
return output_
hidden_states = tensor_parallel.checkpoint(
custom_forward, False, query_layer, key_layer, value_layer, attention_mask
)
return hidden_states
def _allocate_memory(self, inference_max_sequence_len, batch_size):
return torch.empty(
inference_max_sequence_len,
batch_size,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
dtype=self.params_dtype,
device=torch.cuda.current_device(),
)
def forward(
self, hidden_states, attention_mask, encoder_output=None, inference_params=None
):
# hidden_states: [sq, b, h]
# =================================================
# Pre-allocate memory for key-values for inference.
# =================================================
if inference_params:
if self.layer_number not in inference_params.key_value_memory_dict:
inf_max_seq_len = inference_params.max_sequence_len
inf_max_batch_size = inference_params.max_batch_size
inference_key_memory = self._allocate_memory(
inf_max_seq_len, inf_max_batch_size
)
inference_value_memory = self._allocate_memory(
inf_max_seq_len, inf_max_batch_size
)
inference_params.key_value_memory_dict[self.layer_number] = (
inference_key_memory,
inference_value_memory,
)
else:
(
inference_key_memory,
inference_value_memory,
) = inference_params.key_value_memory_dict[self.layer_number]
# =====================
# Query, Key, and Value
# =====================
if self.attention_type == AttnType.self_attn:
# Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
mixed_x_layer, _ = self.query_key_value(hidden_states)
# [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
new_tensor_shape = mixed_x_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
3 * self.hidden_size_per_attention_head,
)
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
# [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
(
query_layer,
key_layer,
value_layer,
) = tensor_parallel.utils.split_tensor_along_last_dim(mixed_x_layer, 3)
else:
# Attention heads [sk, b, h] --> [sk, b, (np * 2 * hn)]
mixed_kv_layer, _ = self.key_value(encoder_output)
# [sk, b, (np * 2 * hn)] --> [sk, b, np, 2 * hn]
new_tensor_shape = mixed_kv_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
2 * self.hidden_size_per_attention_head,
)
mixed_kv_layer = mixed_kv_layer.view(*new_tensor_shape)
# [sk, b, np, 2 * hn] --> 2 [sk, b, np, hn]
(
key_layer,
value_layer,
) = tensor_parallel.utils.split_tensor_along_last_dim(mixed_kv_layer, 2)
# Attention head [sq, b, h] --> [sq, b, hp]
query_layer, _ = self.query(hidden_states)
# [sq, b, hp] --> [sq, b, np, hn]
new_tensor_shape = query_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
)
query_layer = query_layer.view(*new_tensor_shape)
# ==================================
# Adjust key and value for inference
# ==================================
if inference_params:
batch_start = inference_params.batch_size_offset
batch_end = batch_start + key_layer.size(1)
assert batch_end <= inference_key_memory.size(1)
sequence_start = inference_params.sequence_len_offset
sequence_end = sequence_start + key_layer.size(0)
assert sequence_end <= inference_key_memory.size(0)
# Copy key and values.
inference_key_memory[
sequence_start:sequence_end, batch_start:batch_end, ...
] = key_layer
inference_value_memory[
sequence_start:sequence_end, batch_start:batch_end, ...
] = value_layer
key_layer = inference_key_memory[:sequence_end, batch_start:batch_end, ...]
value_layer = inference_value_memory[
:sequence_end, batch_start:batch_end, ...
]
# ==================================
# core attention computation
# ==================================
if self.checkpoint_core_attention:
context_layer = self._checkpointed_attention_forward(
query_layer, key_layer, value_layer, attention_mask
)
else:
context_layer = self.core_attention(
query_layer, key_layer, value_layer, attention_mask
)
# =================
# Output. [sq, b, h]
# =================
output, bias = self.dense(context_layer)
return output, bias
def bias_dropout_add(x: torch.Tensor, bias: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor:
out = torch.nn.functional.dropout(x + bias, p=prob, training=training)
out = residual + out
return out
def get_bias_dropout_add(training):
def _bias_dropout_add(x, bias, residual, prob):
return bias_dropout_add(x, bias, residual, prob, training)
return _bias_dropout_add
class ParallelTransformerLayer(MegatronModule):
"""A single transformer layer.
Transformer layer takes input with size [s, b, h] and returns an
output of the same size.
"""
def __init__(
self,
init_method,
output_layer_init_method,
layer_number,
layer_type=LayerType.encoder,
self_attn_mask_type=AttnMaskType.padding,
drop_path_rate=0.0,
):
args = get_args()
super().__init__()
self.layer_number = layer_number
self.layer_type = layer_type
self.apply_residual_connection_post_layernorm = (
args.apply_residual_connection_post_layernorm
)
self.bf16 = args.bf16
self.fp32_residual_connection = args.fp32_residual_connection
# Layernorm on the input data.
self.input_layernorm = LayerNorm(
args.hidden_size,
eps=args.layernorm_epsilon,
# no_persist_layer_norm=args.no_persist_layer_norm,
sequence_parallel_enabled=args.sequence_parallel,
)
# Self attention.
self.self_attention = ParallelAttention(
init_method,
output_layer_init_method,
layer_number,
attention_type=AttnType.self_attn,
attn_mask_type=self_attn_mask_type,
)
self.hidden_dropout = args.hidden_dropout
self.bias_dropout_fusion = args.bias_dropout_fusion
# note(mkozuki)
# self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 else None
assert drop_path_rate <= 0.0
self.drop_path = None
# Layernorm on the attention output
self.post_attention_layernorm = LayerNorm(
args.hidden_size,
eps=args.layernorm_epsilon,
# no_persist_layer_norm=args.no_persist_layer_norm,
sequence_parallel_enabled=args.sequence_parallel,
)
if self.layer_type == LayerType.decoder:
self.inter_attention = ParallelAttention(
init_method,
output_layer_init_method,
layer_number,
attention_type=AttnType.cross_attn,
)
# Layernorm on the attention output.
self.post_inter_attention_layernorm = LayerNorm(
args.hidden_size,
eps=args.layernorm_epsilon,
# no_persist_layer_norm=args.no_persist_layer_norm,
sequence_parallel_enabled=args.sequence_parallel,
)
# MLP
# note(mkozuki)
assert args.num_experts is None
# if args.num_experts is not None:
# self.mlp = SwitchMLP(init_method, output_layer_init_method)
# else:
# self.mlp = ParallelMLP(init_method, output_layer_init_method)
self.mlp = ParallelMLP(init_method, output_layer_init_method)
# Set bias+dropout+add fusion grad_enable execution handler.
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
use_nvfuser = TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10)
self.bias_dropout_add_exec_handler = (
contextlib.nullcontext if use_nvfuser else torch.enable_grad
)
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
enc_dec_attn_mask=None,
inference_params=None,
):
# hidden_states: [s, b, h]
# Layer norm at the beginning of the transformer layer.
layernorm_output = self.input_layernorm(hidden_states)
# Self attention.
attention_output, attention_bias = self.self_attention(
layernorm_output, attention_mask, inference_params=inference_params
)
# Residual connection.
if self.apply_residual_connection_post_layernorm:
residual = layernorm_output
else:
residual = hidden_states
if self.drop_path is None:
bias_dropout_add_func = get_bias_dropout_add(self.training)
with self.bias_dropout_add_exec_handler():
layernorm_input = bias_dropout_add_func(
attention_output,
attention_bias.expand_as(residual),
residual,
self.hidden_dropout,
)
else:
out = torch.nn.functional.dropout(
attention_output + attention_bias,
p=self.hidden_dropout,
training=self.training,
)
layernorm_input = residual + self.drop_path(out)
# Layer norm post the self attention.
layernorm_output = self.post_attention_layernorm(layernorm_input)
if self.layer_type == LayerType.decoder:
attention_output, attention_bias = self.inter_attention(
layernorm_output, enc_dec_attn_mask, encoder_output=encoder_output
)
# residual connection
if self.apply_residual_connection_post_layernorm:
residual = layernorm_output
else:
residual = layernorm_input
with self.bias_dropout_add_exec_handler():
layernorm_input = bias_dropout_add_func(
attention_output,
attention_bias.expand_as(residual),
residual,
self.hidden_dropout,
)
# Layer norm post the decoder attention
layernorm_output = self.post_inter_attention_layernorm(layernorm_input)
# MLP.
mlp_output, mlp_bias = self.mlp(layernorm_output)
# Second residual connection.
if self.apply_residual_connection_post_layernorm:
residual = layernorm_output
else:
residual = layernorm_input
if self.drop_path is None:
with self.bias_dropout_add_exec_handler():
output = bias_dropout_add_func(
mlp_output,
mlp_bias.expand_as(residual),
residual,
self.hidden_dropout,
)
else:
out = torch.nn.functional.dropout(
mlp_output + mlp_bias, p=self.hidden_dropout, training=self.training
)
output = residual + self.drop_path(out)
return output
class ParallelTransformer(MegatronModule):
"""Transformer class."""
def __init__(
self,
init_method,
output_layer_init_method,
layer_type=LayerType.encoder,
self_attn_mask_type=AttnMaskType.padding,
post_layer_norm=True,
pre_process=True,
post_process=True,
drop_path_rate=0.0,
):
super().__init__()
args = get_args()
self.layer_type = layer_type
self.model_type = args.model_type
self.bf16 = args.bf16
self.fp32_residual_connection = args.fp32_residual_connection
self.post_layer_norm = post_layer_norm
self.pre_process = pre_process
self.post_process = post_process
self.input_tensor = None
self.drop_path_rate = drop_path_rate
# Store activation checkpoiting flag.
self.recompute_granularity = args.recompute_granularity
self.recompute_method = args.recompute_method
self.recompute_num_layers = args.recompute_num_layers
self.distribute_saved_activations = (
args.distribute_saved_activations and not args.sequence_parallel
)
self.sequence_parallel = args.sequence_parallel
# Number of layers.
self.num_layers = get_num_layers(
args, args.model_type == ModelType.encoder_and_decoder
)
self.drop_path_rates = [
rate.item()
for rate in torch.linspace(0, self.drop_path_rate, args.num_layers)
]
# Transformer layers.
def build_layer(layer_number):
return ParallelTransformerLayer(
init_method,
output_layer_init_method,
layer_number,
layer_type=layer_type,
self_attn_mask_type=self_attn_mask_type,
drop_path_rate=self.drop_path_rates[layer_number - 1],
)
if args.virtual_pipeline_model_parallel_size is not None:
assert args.num_layers % args.virtual_pipeline_model_parallel_size == 0, (
"num_layers_per_stage must be divisible by "
"virtual_pipeline_model_parallel_size"
)
assert args.model_type != ModelType.encoder_and_decoder
# Number of layers in each model chunk is the number of layers in the stage,
# divided by the number of model chunks in a stage.
self.num_layers = (
self.num_layers // args.virtual_pipeline_model_parallel_size
)
# With 8 layers, 2 stages, and 4 model chunks, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0] [2] [4] [6]
# Stage 1: [1] [3] [5] [7]
# With 8 layers, 2 stages, and 2 virtual stages, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0, 1] [4, 5]
# Stage 1: [2, 3] [6, 7]
offset = parallel_state.get_virtual_pipeline_model_parallel_rank() * (
args.num_layers // args.virtual_pipeline_model_parallel_size
) + (parallel_state.get_pipeline_model_parallel_rank() * self.num_layers)
else:
# Each stage gets a contiguous set of layers.
if (
args.model_type == ModelType.encoder_and_decoder
and parallel_state.get_pipeline_model_parallel_world_size() > 1
):
pipeline_rank = parallel_state.get_pipeline_model_parallel_rank()
if layer_type == LayerType.encoder:
offset = pipeline_rank * self.num_layers
else:
num_ranks_in_enc = args.pipeline_model_parallel_split_rank
offset = (pipeline_rank - num_ranks_in_enc) * self.num_layers
else:
offset = (
parallel_state.get_pipeline_model_parallel_rank() * self.num_layers
)
if self.num_layers == 0:
# When a standalone embedding stage is used (e.g.,
# args.standalone_embedding_stage == True), virtual pipeline ranks
# on pipeline rank 0 will have zero transformer layers assigned to
# them. This results in the model's input and output tensors to be
# the same, which will cause failure for certain output tensor
# optimizations (e.g., pipeline output deallocation). To remedy
# this, we assign a 'no-op' layer on these ranks, which will
# disconnect the input tensor from the output tensor.
self.num_layers = 1
self.layers = torch.nn.ModuleList([NoopTransformerLayer(1)])
else:
self.layers = torch.nn.ModuleList(
[build_layer(i + 1 + offset) for i in range(self.num_layers)]
)
if self.post_process and self.post_layer_norm:
# Final layer norm before output.
self.final_layernorm = LayerNorm(
args.hidden_size,
eps=args.layernorm_epsilon,
# no_persist_layer_norm=args.no_persist_layer_norm,
sequence_parallel_enabled=args.sequence_parallel,
)
def _get_layer(self, layer_number):
return self.layers[layer_number]
def _checkpointed_forward(
self, hidden_states, attention_mask, encoder_output, enc_dec_attn_mask
):
"""Forward method with activation checkpointing."""
def custom(start, end):
def custom_forward(*inputs):
x_ = inputs[0]
attention_mask = inputs[1]
encoder_output = inputs[2]
enc_dec_attn_mask = inputs[3]
for index in range(start, end):
layer = self._get_layer(index)
x_ = layer(x_, attention_mask, encoder_output, enc_dec_attn_mask)
return x_
return custom_forward
if self.recompute_method == "uniform":
# Uniformly divide the total number of Transformer layers and checkpoint
# the input activation of each divided chunk.
# A method to further reduce memory usage reducing checkpoints.
l = 0
while l < self.num_layers:
hidden_states = tensor_parallel.random.checkpoint(
custom(l, l + self.recompute_num_layers),
self.distribute_saved_activations,
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
)
l += self.recompute_num_layers
elif self.recompute_method == "block":
# Checkpoint the input activation of only a set number of individual
# Transformer layers and skip the rest.
# A method fully use the device memory removing redundant re-computation.
for l in range(self.num_layers):
if l < self.recompute_num_layers:
hidden_states = tensor_parallel.random.checkpoint(
custom(l, l + 1),
self.distribute_saved_activations,
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
)
else:
hidden_states = custom(l, l + 1)(
hidden_states, attention_mask, encoder_output, enc_dec_attn_mask
)
else:
raise ValueError("Invalid activation recompute method.")
return hidden_states
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.input_tensor = input_tensor
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
enc_dec_attn_mask=None,
inference_params=None,
):
# hidden_states: [s, b, h]
# Checks.
if inference_params:
assert (
self.recompute_granularity is None
), "inference does not work with activation checkpointing"
if not self.pre_process:
# See set_input_tensor()
hidden_states = self.input_tensor
# Viewless tensor.
# - We only need to create a viewless tensor in the case of micro batch
# size (mbs) == 1, since in this case, 'hidden_states.transpose()'
# above creates a view tensor, and '.contiguous()' is a pass-through.
# For mbs >= 2, '.contiguous()' creates a new tensor, eliminating
# the need to make it viewless.
#
# However, we don't explicitly check mbs == 1 here because
# make_viewless_tensor() has negligible overhead when its input
# is already viewless.
#
# - For the 'else' case above, calling make_viewless_tensor() here is
# likely redundant, since p2p_communication.py (likely originator)
# already creates viewless tensors. That said, make_viewless_tensor()
# is called here to be future-proof and corner-case-proof.
# hidden_states = mpu.make_viewless_tensor(hidden_states, requires_grad=True, keep_graph=True)
if self.sequence_parallel:
rng_context = tensor_parallel.get_cuda_rng_tracker().fork()
else:
rng_context = contextlib.nullcontext()
with rng_context:
# Forward pass.
if self.recompute_granularity == "full":
hidden_states = self._checkpointed_forward(
hidden_states, attention_mask, encoder_output, enc_dec_attn_mask
)
else:
for index in range(self.num_layers):
layer = self._get_layer(index)
hidden_states = layer(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=inference_params,
)
# Final layer norm.
if self.post_process and self.post_layer_norm:
hidden_states = self.final_layernorm(hidden_states)
return hidden_states
def get_num_layers(args, is_encoder_and_decoder_model):
"""Compute the number of transformer layers resident on the current rank."""
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
if is_encoder_and_decoder_model:
assert args.pipeline_model_parallel_split_rank is not None
# When a standalone embedding stage is used, a rank is taken from
# the encoder's ranks, to be used for the encoder's embedding
# layer. This way, the rank referenced by the 'split rank' remains
# the same whether or not a standalone embedding stage is used.
num_ranks_in_encoder = (
args.pipeline_model_parallel_split_rank - 1
if args.standalone_embedding_stage
else args.pipeline_model_parallel_split_rank
)
num_ranks_in_decoder = (
args.transformer_pipeline_model_parallel_size - num_ranks_in_encoder
)
assert args.num_layers % num_ranks_in_encoder == 0, (
"num_layers (%d) must be divisible by number of ranks given to encoder (%d)"
% (
args.num_layers,
num_ranks_in_encoder,
)
)
assert args.num_layers % num_ranks_in_decoder == 0, (
"num_layers (%d) must be divisible by number of ranks given to decoder (%d)"
% (
args.num_layers,
num_ranks_in_decoder,
)
)
if parallel_state.is_pipeline_stage_before_split():
num_layers = (
0
if args.standalone_embedding_stage
and parallel_state.get_pipeline_model_parallel_rank() == 0
else args.num_layers // num_ranks_in_encoder
)
else:
num_layers = args.num_layers // num_ranks_in_decoder
else:
assert (
args.num_layers % args.transformer_pipeline_model_parallel_size == 0
), "num_layers must be divisible by transformer_pipeline_model_parallel_size"
# When a standalone embedding stage is used, all transformer layers
# are divided among pipeline rank >= 1, while on pipeline rank 0,
# ranks either contain the input embedding layer (virtual pp rank 0),
# or no layers at all (virtual pp rank >= 1).
num_layers = (
0
if args.standalone_embedding_stage
and parallel_state.get_pipeline_model_parallel_rank() == 0
else args.num_layers // args.transformer_pipeline_model_parallel_size
)
else:
num_layers = args.num_layers
return num_layers
class NoopTransformerLayer(MegatronModule):
"""A single 'no-op' transformer layer.
The sole purpose of this layer is for when a standalone embedding layer
is used (i.e., args.standalone_embedding_stage == True). In this case,
zero transformer layers are assigned when pipeline rank == 0. Additionally,
when virtual pipeline rank >= 1, zero total model parameters are created
(virtual rank 0 contains the input embedding). This results in the model's
input and output tensors being the same, which causes an error when
performing certain memory optimiations on the output tensor (e.g.,
deallocating it). Thus, this layer disconnects the input from the output
via a clone. Since ranks containing a no-op layer are generally under-
utilized (both compute and memory), there's no worry of any performance
degredation.
"""
def __init__(self, layer_number):
super().__init__()
self.layer_number = layer_number
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
enc_dec_attn_mask=None,
inference_params=None,
):
return hidden_states.clone()
def parallel_lm_logits(input_, word_embeddings_weight, parallel_output, bias=None):
"""LM logits using word embedding weights."""
args = get_args()
# Parallel logits.
if args.async_tensor_model_parallel_allreduce or args.sequence_parallel:
input_parallel = input_
model_parallel = parallel_state.get_tensor_model_parallel_world_size() > 1
async_grad_allreduce = (
args.async_tensor_model_parallel_allreduce
and model_parallel
and not args.sequence_parallel
)
else:
input_parallel = tensor_parallel.copy_to_tensor_model_parallel_region(input_)
async_grad_allreduce = False
# Matrix multiply.
# logits_parallel = tensor_parallel.layers.LinearWithGradAccumulationAndAsyncCommunication.apply(
# input_parallel, word_embeddings_weight, bias, args.gradient_accumulation_fusion, async_grad_allreduce, args.sequence_parallel)
logits_parallel = (
tensor_parallel.layers.linear_with_grad_accumulation_and_async_allreduce(
input_parallel,
word_embeddings_weight,
bias,
args.gradient_accumulation_fusion,
async_grad_allreduce,
args.sequence_parallel,
)
)
# Gather if needed.
if parallel_output:
return logits_parallel
return tensor_parallel.gather_from_tensor_model_parallel_region(logits_parallel)
def get_language_model(
num_tokentypes,
add_pooler,
encoder_attn_mask_type,
init_method=None,
scaled_init_method=None,
add_encoder=True,
add_decoder=False,
decoder_attn_mask_type=AttnMaskType.causal,
pre_process=True,
post_process=True,
):
"""Build language model and return along with the key to save."""
args = get_args()
if init_method is None:
init_method = init_method_normal(args.init_method_std)
if scaled_init_method is None:
scaled_init_method = scaled_init_method_normal(
args.init_method_std, args.num_layers
)
# Language model.
language_model = TransformerLanguageModel(
init_method,
scaled_init_method,
encoder_attn_mask_type,
num_tokentypes=num_tokentypes,
add_encoder=add_encoder,
add_decoder=add_decoder,
decoder_attn_mask_type=decoder_attn_mask_type,
add_pooler=add_pooler,
pre_process=pre_process,
post_process=post_process,
)
# key used for checkpoints.
language_model_key = "language_model"
return language_model, language_model_key
class Pooler(MegatronModule):
"""Pooler layer.
Pool hidden states of a specific token (for example start of the
sequence) and add a linear transformation followed by a tanh.
Arguments:
hidden_size: hidden size
init_method: weight initialization method for the linear layer.
bias is set to zero.
"""
def __init__(self, hidden_size, init_method):
super().__init__()
args = get_args()
self.dense = get_linear_layer(hidden_size, hidden_size, init_method)
self.sequence_parallel = args.sequence_parallel
def forward(self, hidden_states, sequence_index=0):
# hidden_states: [s, b, h]
# sequence_index: index of the token to pool.
# gather data along sequence dimensions
# same pooler is run on all tensor parallel nodes
if self.sequence_parallel:
hidden_states = tensor_parallel.mappings.gather_from_sequence_parallel_region(hidden_states)
pooled = hidden_states[sequence_index, :, :]
pooled = self.dense(pooled)
pooled = torch.tanh(pooled)
return pooled
class Embedding(MegatronModule):
"""Language model embeddings.
Arguments:
hidden_size: hidden size
vocab_size: vocabulary size
max_sequence_length: maximum size of sequence. This
is used for positional embedding
embedding_dropout_prob: dropout probability for embeddings
init_method: weight initialization method
num_tokentypes: size of the token-type embeddings. 0 value
will ignore this embedding
"""
def __init__(
self,
hidden_size,
vocab_size,
max_sequence_length,
embedding_dropout_prob,
init_method,
num_tokentypes=0,
):
super().__init__()
self.hidden_size = hidden_size
self.init_method = init_method
self.num_tokentypes = num_tokentypes
args = get_args()
# Word embeddings (parallel).
self.word_embeddings = VocabParallelEmbedding(
vocab_size, self.hidden_size, init_method=self.init_method
)
self._word_embeddings_key = "word_embeddings"
# Position embedding (serial).
self.position_embeddings = torch.nn.Embedding(
max_sequence_length, self.hidden_size
)
self._position_embeddings_key = "position_embeddings"
# Initialize the position embeddings.
self.init_method(self.position_embeddings.weight)
# Token type embedding.
# Add this as an optional field that can be added through
# method call so we can load a pretrain model without
# token types and add them as needed.
self._tokentype_embeddings_key = "tokentype_embeddings"
if self.num_tokentypes > 0:
self.tokentype_embeddings = torch.nn.Embedding(
self.num_tokentypes, self.hidden_size
)
# Initialize the token-type embeddings.
self.init_method(self.tokentype_embeddings.weight)
else:
self.tokentype_embeddings = None
self.fp32_residual_connection = args.fp32_residual_connection
self.sequence_parallel = args.sequence_parallel
# Embeddings dropout
self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob)
def zero_parameters(self):
"""Zero out all parameters in embedding."""
self.word_embeddings.weight.data.fill_(0)
self.word_embeddings.weight.shared = True
self.position_embeddings.weight.data.fill_(0)
self.position_embeddings.weight.shared = True
if self.num_tokentypes > 0:
self.tokentype_embeddings.weight.fill_(0)
self.tokentype_embeddings.weight.shared = True
def add_tokentype_embeddings(self, num_tokentypes):
"""Add token-type embedding. This function is provided so we can add
token-type embeddings in case the pretrained model does not have it.
This allows us to load the model normally and then add this embedding.
"""
if self.tokentype_embeddings is not None:
raise Exception("tokentype embeddings is already initialized")
if torch.distributed.get_rank() == 0:
print(
"adding embedding for {} tokentypes".format(num_tokentypes), flush=True
)
self.num_tokentypes = num_tokentypes
self.tokentype_embeddings = torch.nn.Embedding(num_tokentypes, self.hidden_size)
# Initialize the token-type embeddings.
self.init_method(self.tokentype_embeddings.weight)
def forward(self, input_ids, position_ids, tokentype_ids=None):
# Embeddings.
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
if tokentype_ids is not None:
assert self.tokentype_embeddings is not None
embeddings = embeddings + self.tokentype_embeddings(tokentype_ids)
else:
assert self.tokentype_embeddings is None
# Data format change to avoid explicit tranposes : [b s h] --> [s b h].
embeddings = embeddings.transpose(0, 1).contiguous()
# If the input flag for fp32 residual connection is set, convert for float.
if self.fp32_residual_connection:
embeddings = embeddings.float()
# Dropout.
if self.sequence_parallel:
embeddings = scatter_to_sequence_parallel_region(embeddings)
with tensor_parallel.get_cuda_rng_tracker().fork():
embeddings = self.embedding_dropout(embeddings)
else:
embeddings = self.embedding_dropout(embeddings)
return embeddings
class TransformerLanguageModel(MegatronModule):
"""Transformer language model.
Arguments:
transformer_hparams: transformer hyperparameters
vocab_size: vocabulary size
max_sequence_length: maximum size of sequence. This
is used for positional embedding
embedding_dropout_prob: dropout probability for embeddings
num_tokentypes: size of the token-type embeddings. 0 value
will ignore this embedding
"""
def __init__(
self,
init_method,
output_layer_init_method,
encoder_attn_mask_type,
num_tokentypes=0,
add_encoder=True,
add_decoder=False,
decoder_attn_mask_type=AttnMaskType.causal,
add_pooler=False,
pre_process=True,
post_process=True,
):
super().__init__()
args = get_args()
self.pre_process = pre_process
self.post_process = post_process
self.hidden_size = args.hidden_size
self.num_tokentypes = num_tokentypes
self.init_method = init_method
self.add_encoder = add_encoder
self.encoder_attn_mask_type = encoder_attn_mask_type
self.add_decoder = add_decoder
self.decoder_attn_mask_type = decoder_attn_mask_type
self.add_pooler = add_pooler
self.encoder_hidden_state = None
# Embeddings.
if self.pre_process:
self.embedding = Embedding(
self.hidden_size,
args.padded_vocab_size,
args.max_position_embeddings,
args.hidden_dropout,
self.init_method,
self.num_tokentypes,
)
self._embedding_key = "embedding"
# Transformer.
# Encoder (usually set to True, False if part of an encoder-decoder
# architecture and in encoder-only stage).
if self.add_encoder:
self.encoder = ParallelTransformer(
self.init_method,
output_layer_init_method,
self_attn_mask_type=self.encoder_attn_mask_type,
pre_process=self.pre_process,
post_process=self.post_process,
)
self._encoder_key = "encoder"
else:
self.encoder = None
# Decoder (usually set to False, True if part of an encoder-decoder
# architecture and in decoder-only stage).
if self.add_decoder:
self.decoder = ParallelTransformer(
self.init_method,
output_layer_init_method,
layer_type=LayerType.decoder,
self_attn_mask_type=self.decoder_attn_mask_type,
pre_process=self.pre_process,
post_process=self.post_process,
)
self._decoder_key = "decoder"
else:
self.decoder = None
if self.post_process:
# Pooler.
if self.add_pooler:
self.pooler = Pooler(self.hidden_size, self.init_method)
self._pooler_key = "pooler"
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
# This is usually handled in schedules.py but some inference code still
# gives us non-lists or None
if not isinstance(input_tensor, list):
input_tensor = [input_tensor]
if self.add_encoder and self.add_decoder:
assert (
len(input_tensor) == 1
), "input_tensor should only be length 1 for stage with both encoder and decoder"
self.encoder.set_input_tensor(input_tensor[0])
elif self.add_encoder:
assert (
len(input_tensor) == 1
), "input_tensor should only be length 1 for stage with only encoder"
self.encoder.set_input_tensor(input_tensor[0])
elif self.add_decoder:
if len(input_tensor) == 2:
self.decoder.set_input_tensor(input_tensor[0])
self.encoder_hidden_state = input_tensor[1]
elif len(input_tensor) == 1:
self.decoder.set_input_tensor(None)
self.encoder_hidden_state = input_tensor[0]
else:
raise Exception("input_tensor must have either length 1 or 2")
else:
raise Exception("Stage must have at least either encoder or decoder")
def forward(
self,
enc_input_ids,
enc_position_ids,
enc_attn_mask,
dec_input_ids=None,
dec_position_ids=None,
dec_attn_mask=None,
enc_dec_attn_mask=None,
tokentype_ids=None,
inference_params=None,
pooling_sequence_index=0,
enc_hidden_states=None,
output_enc_hidden=False,
):
args = get_args()
# Encoder embedding.
if self.pre_process:
encoder_input = self.embedding(
enc_input_ids, enc_position_ids, tokentype_ids=tokentype_ids
)
else:
encoder_input = None
# Run encoder.
if enc_hidden_states is None:
if self.encoder is not None:
encoder_output = self.encoder(
encoder_input, enc_attn_mask, inference_params=inference_params
)
else:
encoder_output = self.encoder_hidden_state
else:
encoder_output = enc_hidden_states.to(encoder_input.dtype)
if self.post_process:
if self.add_pooler:
pooled_output = self.pooler(encoder_output, pooling_sequence_index)
# output_enc_hidden refers to when we just need the encoder's
# output. For example, it is helpful to compute
# similarity between two sequences by average pooling
if not self.add_decoder or output_enc_hidden:
if self.add_pooler and self.post_process:
return encoder_output, pooled_output
else:
return encoder_output
# Decoder embedding.
if self.pre_process:
decoder_input = self.embedding(dec_input_ids, dec_position_ids)
else:
decoder_input = None
# Run decoder.
decoder_output = self.decoder(
decoder_input,
dec_attn_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=inference_params,
)
if self.add_pooler and self.post_process:
return decoder_output, encoder_output, pooled_output
else:
return decoder_output, encoder_output
def post_language_model_processing(
lm_output, labels, logit_weights, parallel_output, fp16_lm_cross_entropy
):
# Output.
output = parallel_lm_logits(lm_output, logit_weights, parallel_output)
if labels is None:
return output
else:
if fp16_lm_cross_entropy:
assert output.dtype == torch.half
loss = tensor_parallel.vocab_parallel_cross_entropy(output, labels)
else:
loss = tensor_parallel.vocab_parallel_cross_entropy(output.float(), labels)
return loss
def module_size(m: torch.nn.Module, only_trainable: bool = False):
"""
returns the total number of parameters used by `m` (only counting
shared parameters once); if `only_trainable` is True, then only
includes parameters with `requires_grad = True`
"""
parameters = list(m.parameters())
if only_trainable:
parameters = [p for p in parameters if p.requires_grad]
unique = {p.data_ptr(): p for p in parameters}.values()
return sum(p.numel() for p in unique)
| GeneSplice-main | GeneSplice/apex/apex/transformer/testing/standalone_transformer_lm.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron global variables."""
import os
import sys
import time
import torch
from apex.transformer.microbatches import build_num_microbatches_calculator
from .arguments import parse_args
_GLOBAL_ARGS = None
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = None
_GLOBAL_TOKENIZER = None
_GLOBAL_TENSORBOARD_WRITER = None
_GLOBAL_ADLR_AUTORESUME = None
_GLOBAL_TIMERS = None
def get_args():
"""Return arguments."""
_ensure_var_is_initialized(_GLOBAL_ARGS, 'args')
return _GLOBAL_ARGS
def get_num_microbatches() -> int:
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get()
def get_current_global_batch_size() -> int:
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get_current_global_batch_size()
def update_num_microbatches(consumed_samples: int, *, consistency_check: bool = True) -> None:
"""Update the number of microbatches upon the number of consumed samples.
.. note::
This function has no effect unless ``rampup_batch_size`` is set.
Args:
consumed_samples: The number of consumed samples so far. Basically this is equal to
:math:`num_iter * global_batch_size`.
consistency_check: If :obj:`True`, sanity checks the consumed samples, i.e., check if
``consumed_samples`` is divisible by :math:`micro_batch_size \times data_parallel_size`.
"""
_GLOBAL_NUM_MICROBATCHES_CALCULATOR.update(consumed_samples, consistency_check)
# def get_tokenizer():
# """Return tokenizer."""
# _ensure_var_is_initialized(_GLOBAL_TOKENIZER, 'tokenizer')
# return _GLOBAL_TOKENIZER
def get_tensorboard_writer():
"""Return tensorboard writer. It can be None so no need
to check if it is initialized."""
return _GLOBAL_TENSORBOARD_WRITER
def get_adlr_autoresume():
"""ADLR autoresume object. It can be None so no need
to check if it is initialized."""
return _GLOBAL_ADLR_AUTORESUME
def get_timers():
"""Return timers."""
_ensure_var_is_initialized(_GLOBAL_TIMERS, 'timers')
return _GLOBAL_TIMERS
def set_global_variables(extra_args_provider=None, args_defaults={}, override_args={},
ignore_unknown_args=False):
"""Set args, tokenizer, tensorboard-writer, adlr-autoresume, and timers."""
args = _parse_args(extra_args_provider=extra_args_provider,
defaults=args_defaults,
override_args=override_args,
ignore_unknown_args=ignore_unknown_args)
# _build_num_microbatches_calculator(args)
# if args.vocab_file:
# _ = _build_tokenizer(args)
_set_tensorboard_writer(args)
_set_adlr_autoresume(args)
_set_timers()
def _parse_args(extra_args_provider=None, defaults={}, override_args={},
ignore_unknown_args=False):
"""Parse entire arguments."""
global _GLOBAL_ARGS
_ensure_var_is_not_initialized(_GLOBAL_ARGS, 'args')
_GLOBAL_ARGS = parse_args(extra_args_provider=extra_args_provider,
defaults=defaults,
override_args=override_args,
ignore_unknown_args=ignore_unknown_args)
return _GLOBAL_ARGS
def _build_num_microbatches_calculator(args):
global _GLOBAL_NUM_MICROBATCHES_CALCULATOR
_ensure_var_is_not_initialized(_GLOBAL_NUM_MICROBATCHES_CALCULATOR,
'num microbatches calculator')
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = build_num_microbatches_calculator(
args)
# def _build_tokenizer(args):
# """Initialize tokenizer."""
# global _GLOBAL_TOKENIZER
# _ensure_var_is_not_initialized(_GLOBAL_TOKENIZER, 'tokenizer')
# _GLOBAL_TOKENIZER = build_tokenizer(args)
# return _GLOBAL_TOKENIZER
# def rebuild_tokenizer(args):
# global _GLOBAL_TOKENIZER
# _GLOBAL_TOKENIZER = None
# return _build_tokenizer(args)
def _set_tensorboard_writer(args):
"""Set tensorboard writer."""
global _GLOBAL_TENSORBOARD_WRITER
_ensure_var_is_not_initialized(_GLOBAL_TENSORBOARD_WRITER,
'tensorboard writer')
if hasattr(args, 'tensorboard_dir') and \
args.tensorboard_dir and args.rank == (args.world_size - 1):
try:
from torch.utils.tensorboard import SummaryWriter
print('> setting tensorboard ...')
_GLOBAL_TENSORBOARD_WRITER = SummaryWriter(
log_dir=args.tensorboard_dir,
max_queue=args.tensorboard_queue_size)
except ModuleNotFoundError:
print('WARNING: TensorBoard writing requested but is not '
'available (are you using PyTorch 1.1.0 or later?), '
'no TensorBoard logs will be written.', flush=True)
def _set_adlr_autoresume(args):
"""Initialize ADLR autoresume."""
global _GLOBAL_ADLR_AUTORESUME
_ensure_var_is_not_initialized(_GLOBAL_ADLR_AUTORESUME, 'adlr autoresume')
if args.adlr_autoresume:
if args.rank == 0:
print('enabling autoresume ...', flush=True)
sys.path.append(os.environ.get('SUBMIT_SCRIPTS', '.'))
try:
from userlib.auto_resume import AutoResume
except BaseException:
print('ADLR autoresume is not available, exiting ...')
sys.exit()
_GLOBAL_ADLR_AUTORESUME = AutoResume
def _set_timers():
"""Initialize timers."""
global _GLOBAL_TIMERS
_ensure_var_is_not_initialized(_GLOBAL_TIMERS, 'timers')
_GLOBAL_TIMERS = Timers()
def _ensure_var_is_initialized(var, name):
"""Make sure the input variable is not None."""
assert var is not None, '{} is not initialized.'.format(name)
def _ensure_var_is_not_initialized(var, name):
"""Make sure the input variable is not None."""
assert var is None, '{} is already initialized.'.format(name)
class _Timer:
"""Timer."""
def __init__(self, name):
self.name_ = name
self.elapsed_ = 0.0
self.started_ = False
self.start_time = time.time()
def start(self):
"""Start the timer."""
assert not self.started_, 'timer has already been started'
torch.cuda.synchronize()
self.start_time = time.time()
self.started_ = True
def stop(self):
"""Stop the timer."""
assert self.started_, 'timer is not started'
torch.cuda.synchronize()
self.elapsed_ += (time.time() - self.start_time)
self.started_ = False
def reset(self):
"""Reset timer."""
self.elapsed_ = 0.0
self.started_ = False
def elapsed(self, reset=True):
"""Calculate the elapsed time."""
started_ = self.started_
# If the timing in progress, end it first.
if self.started_:
self.stop()
# Get the elapsed time.
elapsed_ = self.elapsed_
# Reset the elapsed time
if reset:
self.reset()
# If timing was in progress, set it back.
if started_:
self.start()
return elapsed_
class Timers:
"""Group of timers."""
def __init__(self):
self.timers = {}
def __call__(self, name):
if name not in self.timers:
self.timers[name] = _Timer(name)
return self.timers[name]
def write(self, names, writer, iteration, normalizer=1.0, reset=False):
"""Write timers to a tensorboard writer"""
# currently when using add_scalars,
# torch.utils.add_scalars makes each timer its own run, which
# polutes the runs list, so we just add each as a scalar
assert normalizer > 0.0
for name in names:
value = self.timers[name].elapsed(reset=reset) / normalizer
writer.add_scalar(name + '-time', value, iteration)
def log(self, names, normalizer=1.0, reset=True):
"""Log a group of timers."""
assert normalizer > 0.0
string = 'time (ms)'
for name in names:
elapsed_time = self.timers[name].elapsed(
reset=reset) * 1000.0 / normalizer
string += ' | {}: {:.2f}'.format(name, elapsed_time)
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == (
torch.distributed.get_world_size() - 1):
print(string, flush=True)
else:
print(string, flush=True)
| GeneSplice-main | GeneSplice/apex/apex/transformer/testing/global_vars.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import torch
from apex.transformer import parallel_state
class GradScaler(torch.cuda.amp.GradScaler):
"""
Gradient scaler for model-parallel inf check. The inf in gradients are checked across tensor-parallel
ranks in (1) executing optimizer step and (2) gradient scaler update.
"""
def __init__(
self, init_scale=2.0 ** 16, growth_factor=2.0, backoff_factor=0.5, growth_interval=2000, enabled=True
):
super().__init__(
init_scale=init_scale,
growth_factor=growth_factor,
backoff_factor=backoff_factor,
growth_interval=growth_interval,
enabled=enabled,
)
def _unscale_grads_(self, optimizer, *args):
if getattr(optimizer, "_custom_amp_unscale_grads", False):
return optimizer.unscale_grads(*args)
else:
return super()._unscale_grads_(optimizer, *args)
def _maybe_opt_step(self, optimizer, optimizer_state, *args, **kwargs):
retval = None
found_inf = torch.cuda.FloatTensor([sum(v.item() for v in optimizer_state["found_inf_per_device"].values())])
# Update across all model parallel instances.
torch.distributed.all_reduce(
found_inf, op=torch.distributed.ReduceOp.MAX, group=parallel_state.get_model_parallel_group()
)
if found_inf.item() == 0:
retval = optimizer.step(*args, **kwargs)
return retval
def update(self, new_scale=None):
"""
Updates the scale factor.
If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
the scale is multiplied by ``growth_factor`` to increase it.
Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
used directly, it's used to fill GradScaler's internal scale tensor. So if
``new_scale`` was a tensor, later in-place changes to that tensor will not further
affect the scale GradScaler uses internally.)
Args:
new_scale (float or :class:`torch.cuda.FloatTensor`, optional, default=None): New scale factor.
.. warning::
:meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
been invoked for all optimizers used this iteration.
"""
if not self._enabled:
return
_scale, _growth_tracker = self._check_scale_growth_tracker("update")
if new_scale is not None:
# Accept a new user-defined scale.
if isinstance(new_scale, float):
self._scale.fill_(new_scale) # type: ignore[union-attr]
else:
reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False."
assert isinstance(new_scale, torch.cuda.FloatTensor), reason # type: ignore[attr-defined]
assert new_scale.numel() == 1, reason
assert new_scale.requires_grad is False, reason
self._scale.copy_(new_scale) # type: ignore[union-attr]
else:
# Consume shared inf/nan data collected from optimizers to update the scale.
# If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
found_infs = [
found_inf.to(device=_scale.device, non_blocking=True)
for state in self._per_optimizer_states.values()
for found_inf in state["found_inf_per_device"].values()
]
assert len(found_infs) > 0, "No inf checks were recorded prior to update."
found_inf_combined = found_infs[0]
# Update across all model parallel instances.
torch.distributed.all_reduce(
found_inf_combined, op=torch.distributed.ReduceOp.MAX, group=parallel_state.get_model_parallel_group()
)
if len(found_infs) > 1:
for i in range(1, len(found_infs)):
found_inf = found_infs[i]
# Update across all model parallel instances.
torch.distributed.all_reduce(
found_inf, op=torch.distributed.ReduceOp.MAX, group=parallel_state.get_model_parallel_group()
)
found_inf_combined += found_inf
torch._amp_update_scale_(
_scale,
_growth_tracker,
found_inf_combined,
self._growth_factor,
self._backoff_factor,
self._growth_interval,
)
# To prepare for next iteration, clear the data collected from optimizers this iteration.
self._per_optimizer_states = defaultdict(torch.cuda.amp.grad_scaler._refresh_per_optimizer_state)
| GeneSplice-main | GeneSplice/apex/apex/transformer/amp/grad_scaler.py |
from apex.transformer.amp.grad_scaler import GradScaler
__all__ = [
"GradScaler",
]
| GeneSplice-main | GeneSplice/apex/apex/transformer/amp/__init__.py |
from apex.transformer._data._batchsampler import MegatronPretrainingRandomSampler
from apex.transformer._data._batchsampler import MegatronPretrainingSampler
__all__ = [
"MegatronPretrainingRandomSampler",
"MegatronPretrainingSampler",
]
| GeneSplice-main | GeneSplice/apex/apex/transformer/_data/__init__.py |
"""BatchSampler implementations for POC of dynamic batch size or rampup_batch_size support.
Implementations are based on https://github.com/NVIDIA/Megatron-LM/blob/bcd605f8570ebeeb0436c115ebbfafc3c5a40ae5/megatron/data/data_samplers.py.
""" # NOQA
import abc
import torch
__all__ = [
"MegatronPretrainingSampler",
"MegatronPretrainingRandomSampler",
]
class _Base:
"""Base class for Megatron style BatchSampler."""
@abc.abstractmethod
def __len__(self) -> int:
...
@abc.abstractmethod
def __iter__(self):
...
@property
@abc.abstractmethod
def local_minibatch_size(self) -> int:
...
@local_minibatch_size.setter
@abc.abstractclassmethod
def local_minibatch_size(self) -> None:
...
class MegatronPretrainingSampler(_Base):
def __init__(
self,
total_samples: int,
consumed_samples: int,
local_minibatch_size: int,
data_parallel_rank: int,
data_parallel_size: int,
drop_last: bool = True,
):
# Sanity checks.
if total_samples <= 0:
raise RuntimeError('no sample to consume: {}'.format(self.total_samples))
if consumed_samples >= total_samples:
raise RuntimeError('no samples left to consume: {}, {}'.format(self.consumed_samples, self.total_samples))
if local_minibatch_size <= 0:
raise RuntimeError(f"local minibatch size must be greater than 0: {local_minibatch_size}")
if data_parallel_size <= 0:
raise RuntimeError(f"data parallel size must be greater than 0: {data_parallel_size}")
if data_parallel_rank >= data_parallel_size:
raise RuntimeError('data_parallel_rank should be smaller than data size: {}, {}'.format(self.data_parallel_rank, data_parallel_size))
# Keep a copy of input params for later use.
self.total_samples = total_samples
self.consumed_samples = consumed_samples
self._local_minibatch_size = local_minibatch_size
self.data_parallel_rank = data_parallel_rank
self.data_parallel_size = data_parallel_size
self.local_minibatch_times_data_parallel_size = self._local_minibatch_size * data_parallel_size
self.drop_last = drop_last
def __len__(self):
return self.total_samples
def get_start_end_idx(self):
start_idx = self.data_parallel_rank * self.local_minibatch_size
end_idx = start_idx + self.local_minibatch_size
return start_idx, end_idx
@property
def local_minibatch_size(self) -> int:
return self._local_minibatch_size
@local_minibatch_size.setter
def local_minibatch_size(self, new_local_minibatch_size) -> None:
self._local_minibatch_size = new_local_minibatch_size
self.local_minibatch_times_data_parallel_size = self._local_minibatch_size * self.data_parallel_size
def __iter__(self):
batch = []
# Last batch will be dropped if drop_last is not set False
for idx in range(self.consumed_samples, self.total_samples):
batch.append(idx)
if len(batch) == self.local_minibatch_size:
start_idx, end_idx = self.get_start_end_idx()
yield batch[start_idx:end_idx]
batch = []
# Check the last partial batch and see drop_last is set
if len(batch) > 0 and not self.drop_last:
start_idx, end_idx = self.get_start_end_idx()
yield batch[start_idx:end_idx]
class MegatronPretrainingRandomSampler(_Base):
"""Megatron style Random Batch Sampler.
Major difference is that `__iter__` yields a local minibatch, not a microbatch.
A local minibatch consists of `global_batch_size / data_parallel_size`
Args:
total_samples: The number of data samples, i.e. ``len(dataset)``.
consumed_samples: The number of samples already consumed in pretraining.
local_minibatch_size: The number of data in each batch returned from `__iter__`. Basically
`local_minibatch_size = global_batch_size / data_parallel_size`.
data_parallel_rank:
data_parallel_size:
"""
def __init__(
self,
total_samples: int,
consumed_samples: int,
local_minibatch_size: int,
data_parallel_rank: int,
data_parallel_size: int,
) -> None:
if total_samples <= 0:
raise ValueError(f"no sample to consume: total_samples of {total_samples}")
if local_minibatch_size <= 0:
raise ValueError(f"Invalid local_minibatch_size: {local_minibatch_size}")
if data_parallel_size <= 0:
raise ValueError(f"Invalid data_parallel_size: {data_parallel_size}")
if data_parallel_rank >= data_parallel_size:
raise ValueError(
f"data_parallel_rank should be smaller than data parallel size: {data_parallel_rank} < {data_parallel_size}"
)
# Keep a copy of input params for later use.
self.total_samples = total_samples
self.consumed_samples = consumed_samples
self._local_minibatch_size = local_minibatch_size
self.data_parallel_rank = data_parallel_rank
self.data_parallel_size = data_parallel_size
self.local_minibatch_times_data_parallel_size = self._local_minibatch_size * self.data_parallel_size
self.last_batch_size = self.total_samples % self.local_minibatch_times_data_parallel_size
def __len__(self) -> int:
return self.total_samples
@property
def local_minibatch_size(self) -> int:
return self._local_minibatch_size
@local_minibatch_size.setter
def local_minibatch_size(self, new_local_minibatch_size) -> None:
self._local_minibatch_size = new_local_minibatch_size
self.local_minibatch_times_data_parallel_size = self._local_minibatch_size * self.data_parallel_size
def __iter__(self):
active_total_samples = self.total_samples - self.last_batch_size
self.epoch = self.consumed_samples // active_total_samples
current_epoch_samples = self.consumed_samples % active_total_samples
# note(mkozuki): might be better to uncomment
# assert current_epoch_samples % (self.data_parallel_size * apex.transformer.pipeline_parallel.utils.get_micro_batch_size()) == 0
# data sharding and random sampling
bucket_size = (self.total_samples // self.local_minibatch_times_data_parallel_size) * self.local_minibatch_size
bucket_offset = current_epoch_samples // self.data_parallel_size
start_idx = self.data_parallel_rank * bucket_size
g = torch.Generator()
g.manual_seed(self.epoch)
random_idx = torch.randperm(bucket_size, generator=g).tolist()
idx_range = [start_idx + x for x in random_idx[bucket_offset:]]
batch = []
# Last batch if not complete will be dropped.
for idx in idx_range:
batch.append(idx)
if len(batch) == self.local_minibatch_size:
self.consumed_samples += self.local_minibatch_times_data_parallel_size
yield batch
batch = []
| GeneSplice-main | GeneSplice/apex/apex/transformer/_data/_batchsampler.py |
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from apex._autocast_utils import _cast_if_autocast_enabled
from apex.transformer.enums import AttnMaskType
class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function):
"""
Fused operation which performs following three operations in sequence
1. Scale the tensor.
2. Apply upper triangular mask (typically used in gpt models).
3. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs, scale):
import scaled_upper_triang_masked_softmax_cuda
scale_t = torch.tensor([scale])
softmax_results = scaled_upper_triang_masked_softmax_cuda.forward(
inputs, scale_t[0]
)
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
import scaled_upper_triang_masked_softmax_cuda
softmax_results, scale_t = ctx.saved_tensors
input_grads = scaled_upper_triang_masked_softmax_cuda.backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None
def scaled_upper_triang_masked_softmax(inputs, _, scale):
b, np, sq, sk = inputs.size()
assert sq == sk, "causal mask is only for self attention"
# Reshaping input to 3D tensor (attn_batches, sq, sk)
inputs = inputs.view(-1, sq, sk)
args = _cast_if_autocast_enabled(inputs, scale)
with torch.cuda.amp.autocast(enabled=False):
probs = ScaledUpperTriangMaskedSoftmax.apply(*args)
return probs.view(b, np, sq, sk)
# NOTE (mkozuki): `ScaledMaskedSoftmax` somehow doesn't work well with `torch.cuda.amp.custom_fwd`.
# Without `cast_inputs` kwarg, somehow inputs are not cast to dtype used in the autocast context.
# So I needed to manually write two `torch.autograd.Function` inheritances.
# Fused operation which performs following three operations in sequence
# 1. Scale the tensor.
# 2. Apply the mask.
# 3. Perform softmax.
class ScaledMaskedSoftmax(torch.autograd.Function):
@staticmethod
def forward(ctx, inputs, mask, scale):
import scaled_masked_softmax_cuda
scale_t = torch.tensor([scale])
softmax_results = scaled_masked_softmax_cuda.forward(inputs, mask, scale_t[0])
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
import scaled_masked_softmax_cuda
softmax_results, scale_t = ctx.saved_tensors
input_grads = scaled_masked_softmax_cuda.backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None, None
def scaled_masked_softmax(inputs, mask, scale):
# input is 4D tensor (b, np, sq, sk)
if mask is not None:
args = _cast_if_autocast_enabled(inputs, mask, scale)
with torch.cuda.amp.autocast(enabled=False):
return ScaledMaskedSoftmax.apply(*args)
else:
args = _cast_if_autocast_enabled(inputs, scale)
with torch.cuda.amp.autocast(enabled=False):
return ScaledSoftmax.apply(*args)
class GenericScaledMaskedSoftmax(torch.autograd.Function):
@staticmethod
def forward(ctx, inputs, mask, scale):
import generic_scaled_masked_softmax_cuda
scale_t = torch.tensor([scale])
softmax_results = generic_scaled_masked_softmax_cuda.forward(inputs, mask, scale_t[0])
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
import generic_scaled_masked_softmax_cuda_new
softmax_results, scale_t = ctx.saved_tensors
input_grads = generic_scaled_masked_softmax_cuda.backward(output_grads, softmax_results, scale_t[0])
return input_grads, None, None
def generic_scaled_masked_softmax(inputs, mask, scale):
# input is 4D tensor (b, np, sq, sk)
args = _cast_if_autocast_enabled(inputs, mask, scale)
with torch.cuda.amp.autocast(enabled=False):
return GenericScaledMaskedSoftmax.apply(*args)
class ScaledSoftmax(torch.autograd.Function):
"""
Fused operation which performs following two operations in sequence
1. Scale the tensor.
2. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs, scale):
import scaled_softmax_cuda
scale_t = torch.tensor([scale])
softmax_results = scaled_softmax_cuda.forward(
inputs, scale_t[0]
)
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
import scaled_softmax_cuda
softmax_results, scale_t = ctx.saved_tensors
input_grads = scaled_softmax_cuda.backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None, None
class FusedScaleMaskSoftmax(torch.nn.Module):
"""
fused operation: scaling + mask + softmax
Arguments:
input_in_fp16: flag to indicate if input in fp16 data format.
input_in_bf16: flag to indicate if input in bf16 data format.
attn_mask_type: attention mask type (pad or causal)
scaled_masked_softmax_fusion: flag to indicate user want to use softmax fusion
mask_func: mask function to be applied.
softmax_in_fp32: if true, softmax in performed at fp32 precision.
scale: scaling factor used in input tensor scaling.
"""
def __init__(
self,
input_in_fp16,
input_in_bf16,
attn_mask_type,
scaled_masked_softmax_fusion,
mask_func,
softmax_in_fp32,
scale,
):
super().__init__()
self.input_in_fp16 = input_in_fp16
self.input_in_bf16 = input_in_bf16
if self.input_in_fp16 and self.input_in_bf16:
raise RuntimeError(
"both fp16 and bf16 flags cannot be active at the same time."
)
self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16
self.attn_mask_type = attn_mask_type
self.scaled_masked_softmax_fusion = scaled_masked_softmax_fusion
self.mask_func = mask_func
self.softmax_in_fp32 = softmax_in_fp32
self.scale = scale
if not (self.scale is None or softmax_in_fp32):
raise RuntimeError("softmax should be in fp32 when scaled")
if self.scaled_masked_softmax_fusion:
if self.attn_mask_type == AttnMaskType.causal:
self.fused_softmax_func = scaled_upper_triang_masked_softmax
elif self.attn_mask_type == AttnMaskType.padding:
self.fused_softmax_func = scaled_masked_softmax
else:
raise ValueError("Invalid attn_mask_type.")
def forward(self, input, mask):
# [b, np, sq, sk]
assert input.dim() == 4
if self.is_kernel_available(mask, *input.size()):
return self.forward_fused_softmax(input, mask)
else:
return self.forward_torch_softmax(input, mask)
def is_kernel_available(self, mask, b, np, sq, sk):
attn_batches = b * np
if (
self.scaled_masked_softmax_fusion # user want to fuse
and self.input_in_float16 # input must be fp16
and (
self.attn_mask_type == AttnMaskType.causal
or self.attn_mask_type == AttnMaskType.padding
)
and 16 < sk <= 16384 # sk must be 16 ~ 16384
and sq % 4 == 0 # sq must be divisor of 4
and sk % 4 == 0 # sk must be divisor of 4
and attn_batches % 4 == 0 # np * b must be divisor of 4
):
if 0 <= sk <= 16384:
batch_per_block = self.get_batch_per_block(sq, sk, b, np)
if self.attn_mask_type == AttnMaskType.causal:
if attn_batches % batch_per_block == 0:
return True
else:
if sq % batch_per_block == 0:
return True
return False
def forward_fused_softmax(self, input, mask):
# input.shape = [b, np, sq, sk]
scale = self.scale if self.scale is not None else 1.0
return self.fused_softmax_func(input, mask, scale)
def forward_torch_softmax(self, input, mask):
if self.input_in_float16 and self.softmax_in_fp32:
input = input.float()
if self.scale is not None:
input = input * self.scale
mask_output = self.mask_func(input, mask) if mask is not None else input
probs = torch.nn.Softmax(dim=-1)(mask_output)
if self.input_in_float16 and self.softmax_in_fp32:
if self.input_in_fp16:
probs = probs.half()
else:
probs = probs.bfloat16()
return probs
@staticmethod
def get_batch_per_block(sq, sk, b, np):
import scaled_masked_softmax_cuda
return scaled_masked_softmax_cuda.get_batch_per_block(sq, sk, b, np)
class GenericFusedScaleMaskSoftmax(FusedScaleMaskSoftmax):
"""
Generic version of FusedSacleMaskSoftmax.
It removes the seq-len limitations and has slight performance degragation compared with FusedScaleMaskSoftmax
fused operation: scaling + mask + softmax
Arguments:
input_in_fp16: flag to indicate if input in fp16 data format.
input_in_bf16: flag to indicate if input in bf16 data format.
scaled_masked_softmax_fusion: flag to indicate user want to use softmax fusion
mask_func: mask function to be applied.
softmax_in_fp32: if true, softmax in performed at fp32 precision.
scale: scaling factor used in input tensor scaling.
"""
def __init__(
self, input_in_fp16, input_in_bf16, scaled_masked_softmax_fusion, mask_func, softmax_in_fp32, scale,
):
super().__init__(input_in_fp16, input_in_bf16, AttnMaskType.padding, scaled_masked_softmax_fusion, mask_func, softmax_in_fp32, scale)
self.scaled_masked_softmax_fusion = generic_scaled_masked_softmax
def is_kernel_available(self, mask, b, np, sq, sk):
if self.scaled_masked_softmax_fusion and 0 < sk: # user want to fuse # sk must be 1 ~
return True
return False | GeneSplice-main | GeneSplice/apex/apex/transformer/functional/fused_softmax.py |
from apex.transformer.functional.fused_softmax import FusedScaleMaskSoftmax
__all__ = [
"FusedScaleMaskSoftmax",
]
| GeneSplice-main | GeneSplice/apex/apex/transformer/functional/__init__.py |
from .fp16util import (
BN_convert_float,
network_to_half,
prep_param_lists,
model_grads_to_master_grads,
master_params_to_model_params,
tofp16,
to_python_float,
clip_grad_norm,
convert_module,
convert_network,
FP16Model,
)
from .fp16_optimizer import FP16_Optimizer
from .loss_scaler import LossScaler, DynamicLossScaler
| GeneSplice-main | GeneSplice/apex/apex/fp16_utils/__init__.py |
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
class tofp16(nn.Module):
"""
Utility module that implements::
def forward(self, input):
return input.half()
"""
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
def BN_convert_float(module):
"""
Utility function for network_to_half().
Retained for legacy purposes.
"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
module.float()
for child in module.children():
BN_convert_float(child)
return module
def network_to_half(network):
"""
Convert model to half precision in a batchnorm-safe way.
Retained for legacy purposes. It is recommended to use FP16Model.
"""
return nn.Sequential(tofp16(), BN_convert_float(network.half()))
def convert_module(module, dtype):
"""
Converts a module's immediate parameters and buffers to dtype.
"""
for param in module.parameters(recurse=False):
if param is not None:
if param.data.dtype.is_floating_point:
param.data = param.data.to(dtype=dtype)
if param._grad is not None and param._grad.data.dtype.is_floating_point:
param._grad.data = param._grad.data.to(dtype=dtype)
for buf in module.buffers(recurse=False):
if buf is not None and buf.data.dtype.is_floating_point:
buf.data = buf.data.to(dtype=dtype)
def convert_network(network, dtype):
"""
Converts a network's parameters and buffers to dtype.
"""
for module in network.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
continue
convert_module(module, dtype)
if isinstance(module, torch.nn.RNNBase) or isinstance(module, torch.nn.modules.rnn.RNNBase):
module.flatten_parameters()
return network
class FP16Model(nn.Module):
"""
Convert model to half precision in a batchnorm-safe way.
"""
def __init__(self, network):
from apex import deprecated_warning
deprecated_warning("apex.fp16_utils is deprecated and will be removed by the end of February 2023. Use [PyTorch AMP](https://pytorch.org/docs/stable/amp.html)")
super(FP16Model, self).__init__()
self.network = convert_network(network, dtype=torch.half)
def forward(self, *inputs):
inputs = tuple(t.half() for t in inputs)
return self.network(*inputs)
def backwards_debug_hook(grad):
raise RuntimeError("master_params recieved a gradient in the backward pass!")
def prep_param_lists(model, flat_master=False):
"""
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
"""
model_params = [param for param in model.parameters() if param.requires_grad]
if flat_master:
# Give the user some more useful error messages
try:
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors([param.data for param in model_params]).float()
except:
print("Error in prep_param_lists: model may contain a mixture of parameters "
"of different types. Use flat_master=False, or use F16_Optimizer.")
raise
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return model_params, [master_params]
else:
master_params = [param.clone().float().detach() for param in model_params]
for param in master_params:
param.requires_grad = True
return model_params, master_params
def model_grads_to_master_grads(model_params, master_params, flat_master=False):
"""
Copy model gradients to master gradients.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
"""
if flat_master:
# The flattening may incur one more deep copy than is necessary.
master_params[0].grad.data.copy_(
_flatten_dense_tensors([p.grad.data for p in model_params]))
else:
for model, master in zip(model_params, master_params):
if model.grad is not None:
if master.grad is None:
master.grad = Variable(master.data.new(*master.data.size()))
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master_params_to_model_params(model_params, master_params, flat_master=False):
"""
Copy master parameters to model parameters.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
"""
if flat_master:
for model, master in zip(model_params,
_unflatten_dense_tensors(master_params[0].data, model_params)):
model.data.copy_(master)
else:
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
# Backward compatibility fixes
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 0 and TORCH_MINOR <= 4:
clip_grad_norm = torch.nn.utils.clip_grad_norm
else:
clip_grad_norm = torch.nn.utils.clip_grad_norm_
| GeneSplice-main | GeneSplice/apex/apex/fp16_utils/fp16util.py |
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from ..amp._amp_state import _amp_state, maybe_print
from ..amp.scaler import LossScaler
from ..multi_tensor_apply import multi_tensor_applier
from .fp16util import model_grads_to_master_grads, master_params_to_model_params, clip_grad_norm
# TODO: Update overflow check + downscale to use Carl's fused kernel.
class FP16_Optimizer(object):
def __init__(self,
init_optimizer,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True):
print("Warning: FP16_Optimizer is deprecated and dangerous, and will be deleted soon. "
"If it still works, you're probably getting lucky. "
"For mixed precision, use the documented API https://nvidia.github.io/apex/amp.html, with opt_level=O1.")
from apex import deprecated_warning
deprecated_warning("apex.fp16_utils is deprecated and will be removed by the end of February 2023. Use [PyTorch AMP](https://pytorch.org/docs/stable/amp.html)")
if not torch.cuda.is_available:
raise SystemError("Cannot use fp16 without CUDA.")
self.verbose = verbose
self.optimizer = init_optimizer
# init_state_dict sets up an alternative way to cast per-param state tensors.
# Stashing here in case https://github.com/pytorch/pytorch/issues/7733 makes it necessary.
# init_state_dict = init_optimizer.state_dict()
self.fp16_groups = []
self.fp32_from_fp16_groups = []
self.fp32_from_fp32_groups = []
for i, param_group in enumerate(self.optimizer.param_groups):
self.maybe_print("FP16_Optimizer processing param group {}:".format(i))
fp16_params_this_group = []
fp32_params_this_group = []
fp32_from_fp16_params_this_group = []
for i, param in enumerate(param_group['params']):
if param.requires_grad:
if param.type() == 'torch.cuda.HalfTensor':
self.maybe_print("FP16_Optimizer received torch.cuda.HalfTensor with {}"
.format(param.size()))
fp16_params_this_group.append(param)
master_param = param.detach().clone().float()
master_param.requires_grad = True
param_group['params'][i] = master_param
fp32_from_fp16_params_this_group.append(master_param)
# Reset existing state dict key to the new master param.
# We still need to recast per-param state tensors, if any, to FP32.
if param in self.optimizer.state:
self.optimizer.state[master_param] = self.optimizer.state.pop(param)
elif param.type() == 'torch.cuda.FloatTensor':
self.maybe_print("FP16_Optimizer received torch.cuda.FloatTensor with {}"
.format(param.size()))
fp32_params_this_group.append(param)
param_group['params'][i] = param
else:
raise TypeError("Wrapped parameters must be either "
"torch.cuda.FloatTensor or torch.cuda.HalfTensor. "
"Received {}".format(param.type()))
self.fp16_groups.append(fp16_params_this_group)
self.fp32_from_fp16_groups.append(fp32_from_fp16_params_this_group)
self.fp32_from_fp32_groups.append(fp32_params_this_group)
self.all_fp16_params = []
for group in self.fp16_groups:
self.all_fp16_params += group
self.all_fp32_from_fp16_params = []
for group in self.fp32_from_fp16_groups:
self.all_fp32_from_fp16_params += group
self.all_fp32_from_fp32_params = []
for group in self.fp32_from_fp32_groups:
self.all_fp32_from_fp32_params += group
# Leverage state_dict() and load_state_dict() to recast preexisting per-param state tensors
self.optimizer.load_state_dict(self.optimizer.state_dict())
# alternative way to cast per-param state tensors:
# self.optimizer.load_state_dict(init_state_dict)
if dynamic_loss_scale:
self.dynamic_loss_scale = True
if dynamic_loss_args is not None:
self.loss_scaler = LossScaler("dynamic", **dynamic_loss_args)
else:
self.loss_scaler = LossScaler("dynamic")
else:
self.dynamic_loss_scale = False
self.loss_scaler = LossScaler(static_loss_scale)
self.overflow = False
self.first_closure_call_this_step = True
self.clip_grad_norm = clip_grad_norm
# TODO: Centralize exposure and import error checking for the C backend.
if multi_tensor_applier.available:
import amp_C
self.multi_tensor_scale = amp_C.multi_tensor_scale
self._dummy_overflow_buf = torch.cuda.IntTensor([0]);
# Having self.maybe_print distinct from _amp_state.maybe_print is another artifact
# of having to support FP16_Optimizer separately, for the time being.
def maybe_print(self, msg):
if self.verbose:
print(msg)
def __getstate__(self):
raise RuntimeError("FP16_Optimizer should be serialized using state_dict().")
def __setstate__(self, state):
raise RuntimeError("FP16_Optimizer should be deserialized using load_state_dict().")
def zero_grad(self, set_grads_to_None=False):
"""
Zero fp32 and fp16 parameter grads.
"""
# In principle, only the .grad attributes of the model params need to be zeroed,
# because gradients are copied into the FP32 master params. However, we zero
# all gradients owned by the optimizer, just to be safe:
for group in self.optimizer.param_groups:
for p in group['params']:
if set_grads_to_None:
p.grad = None
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
# Zero fp16 gradients owned by the model:
for fp16_group in self.fp16_groups:
for param in fp16_group:
if set_grads_to_None:
param.grad = None
else:
if param.grad is not None:
param.grad.detach_() # as in torch.optim.optimizer.zero_grad()
param.grad.zero_()
# Should not be used anymore.
# def _check_overflow(self):
# params = []
# for group in self.fp16_groups:
# for param in group:
# params.append(param)
# for group in self.fp32_from_fp32_groups:
# for param in group:
# params.append(param)
# self.overflow = self.loss_scaler.has_overflow(params)
# def _update_scale(self, has_overflow=False):
# self.loss_scaler.update_scale(has_overflow)
def _master_params_to_model_params(self):
if multi_tensor_applier.available:
if len(self.all_fp16_params) > 0:
multi_tensor_applier(
self.multi_tensor_scale,
self._dummy_overflow_buf,
[self.all_fp32_from_fp16_params, self.all_fp16_params],
1.0)
else:
for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups):
master_params_to_model_params(fp16_group, fp32_from_fp16_group)
# To consider: Integrate distributed with this wrapper by registering a hook on each variable
# that does the overflow check, gradient copy + downscale, and fp32 allreduce in a different stream.
# def _model_grads_to_master_grads(self):
# for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups):
# model_grads_to_master_grads(fp16_group, fp32_from_fp16_group)
# def _downscale_master(self):
# if self.loss_scale != 1.0:
# for group in self.optimizer.param_groups:
# for param in group['params']:
# if param.grad is not None:
# param.grad.data.mul_(1./self.loss_scale)
def clip_master_grads(self, max_norm, norm_type=2):
"""
Clips fp32 master gradients via ``torch.nn.utils.clip_grad_norm``.
Args:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the current fp32 gradients (viewed as a single vector).
.. warning::
Returns -1 if the most recently computed fp16 gradients overflowed (that is, if ``self.overflow`` is ``True``).
"""
if not self.overflow:
fp32_params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
fp32_params.append(param)
return self.clip_grad_norm(fp32_params, max_norm, norm_type)
else:
return -1
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
state_dict['fp32_from_fp16'] = self.fp32_from_fp16_groups
return state_dict
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
self.first_closure_call_this_step = state_dict['first_closure_call_this_step']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current_group, saved_group in zip(self.fp32_from_fp16_groups, state_dict['fp32_from_fp16']):
for current, saved in zip(current_group, saved_group):
current.data.copy_(saved.data)
def step(self, closure=None): # could add clip option.
"""
If no closure is supplied, :attr:`step` should be called after
``fp16_optimizer_obj.backward(loss)``.
:attr:`step` updates the fp32 master copy of parameters using the optimizer supplied to
:class:`FP16_Optimizer`'s constructor, then copies the updated fp32 params into the fp16 params
originally referenced by :class:`FP16_Optimizer`'s constructor, so the user may immediately run
another forward pass using their model.
If a closure is supplied, :attr:`step` may be called without a prior call to
:attr:`backward(loss)`.
This control flow is identical to `ordinary Pytorch optimizer use`_ with closures.
However, the user should take care that any ``loss.backward()`` call within the closure
has been replaced by ``fp16_optimizer_obj.backward(loss)``.
Args:
closure (optional): Closure that will be supplied to the underlying optimizer originally passed to :class:`FP16_Optimizer`'s constructor. closure should call :attr:`zero_grad()` on the :class:`FP16_Optimizer` object, compute the loss, call :attr:`backward(loss)`, and return the loss.
Example with closure::
# optimizer is assumed to be an FP16_Optimizer object, previously constructed from an
# existing pytorch optimizer.
for input, target in dataset:
def closure():
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# loss.backward() becomes:
optimizer.backward(loss)
return loss
optimizer.step(closure)
.. warning::
Currently, calling :attr:`step` with a closure is not compatible with dynamic loss scaling.
.. _`ordinary Pytorch optimizer use`:
http://pytorch.org/docs/master/optim.html#optimizer-step-closure
"""
scale = self.loss_scaler.loss_scale()
# To consider: Should this be in step(), or update_master_grads? It works either way,
# but I should make it consistent with the Amp control flow, which updates the scale
# during backward context manager exit.
# self._update_scale(self.overflow)
if self.overflow:
# Using _amp_state.maybe_print instead of self.print here is intentional.
maybe_print("Gradient overflow. Skipping step, reducing " +
"loss scale to {}".format(self.loss_scaler.loss_scale()))
return
if closure is not None:
retval = self._step_with_closure(closure)
else:
# torch.cuda.nvtx.range_push("pytorch optimizer step")
retval = self.optimizer.step()
# torch.cuda.nvtx.range_pop()
self._master_params_to_model_params()
return retval
def _step_with_closure(self, closure):
def wrapped_closure():
# helpful for debugging
# print("Calling wrapped_closure, first_closure_call_this_step = {}"
# .format(self.first_closure_call_this_step))
if self.first_closure_call_this_step:
# We expect that the fp16 params are initially fresh on entering self.step(),
# so _master_params_to_model_params() is unnecessary the first time wrapped_closure()
# is called within self.optimizer.step().
self.first_closure_call_this_step = False
else:
# If self.optimizer.step() internally calls wrapped_closure more than once,
# it may update the fp32 params after each call. However, self.optimizer
# doesn't know about the fp16 params at all. If the fp32 params get updated,
# we can't rely on self.optimizer to refresh the fp16 params. We need
# to handle that manually:
self._master_params_to_model_params()
# Our API expects the user to give us ownership of the backward() call by
# replacing all calls to loss.backward() with optimizer.backward(loss).
# This requirement holds whether or not the call to backward() is made within a closure.
# If the user is properly calling optimizer.backward(loss) within "closure,"
# calling closure() here will give the fp32 master params fresh gradients
# for the optimizer to play with, so all wrapped_closure needs to do is call
# closure() and return the loss.
temp_loss = closure()
while(self.overflow):
scale = self.loss_scaler.loss_scale()
# self._update_scale(self.overflow) # now done at the end of backward
print("OVERFLOW within closure! Skipping step, reducing loss scale to {}".format(
self.loss_scaler.loss_scale()))
temp_loss = closure()
return temp_loss
retval = self.optimizer.step(wrapped_closure)
self.first_closure_call_this_step = True
return retval
def backward(self, loss, update_master_grads=True, retain_graph=False):
"""
:attr:`backward` performs the following conceptual steps:
1. fp32_loss = loss.float() (see first Note below)
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's leaves (which may be fp16, fp32, or a mixture, depending how your model was defined).
4. fp16 grads are then copied to the master params' ``.grad`` attributes (see second Note), which are guaranteed to be fp32.
5. Finally, master grads are divided by loss_scale.
In this way, after :attr:`backward`, the master params have fresh gradients,
and :attr:`step` may be called.
.. note::
:attr:`backward` internally converts the loss to fp32 before applying the loss scale.
This provides some additional safety against overflow if the user has supplied an
fp16 loss value.
However, for maximum overflow safety, the user should
compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to
:attr:`backward`.
.. warning::
The gradients found in a model's leaves after the call to
:attr:`backward` should not be regarded as valid in general,
because it's possible
they have been scaled (and in the case of dynamic loss scaling,
the scale factor may change over time).
If the user wants to inspect gradients after a call to :attr:`backward`,
only the master gradients should be regarded as valid. These can be retrieved via
:attr:`inspect_master_grad_data()`.
Args:
loss: The loss output by the user's model. loss may be either float or half (but see first Note above).
update_master_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay the copy, which is useful to eliminate redundant fp16->fp32 grad copies if :attr:`backward` is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling :attr:`update_master_grads` before calling :attr:`step`.
retain_graph (bool, optional, default=False): Forwards the usual ``retain_graph=True`` option to the internal call to ``loss.backward``. If ``retain_graph`` is being used to accumulate gradient values from multiple backward passes before calling ``optimizer.step``, passing ``update_master_grads=False`` is also recommended (see Example below).
Example::
# Ordinary operation:
optimizer.backward(loss)
# Naive operation with multiple losses (technically valid, but less efficient):
# fp32 grads will be correct after the second call, but
# the first call incurs an unnecessary fp16->fp32 grad copy.
optimizer.backward(loss1)
optimizer.backward(loss2)
# More efficient way to handle multiple losses:
# The fp16->fp32 grad copy is delayed until fp16 grads from all
# losses have been accumulated.
optimizer.backward(loss1, update_master_grads=False)
optimizer.backward(loss2, update_master_grads=False)
optimizer.update_master_grads()
"""
# To consider: try multiple backward passes using retain_grad=True to find
# a loss scale that works. After you find a loss scale that works, do a final dummy
# backward pass with retain_graph=False to tear down the graph. Doing this would avoid
# discarding the iteration, but probably wouldn't improve overall efficiency.
scaled_loss = loss.float()*self.loss_scaler.loss_scale()
scaled_loss.backward(retain_graph=retain_graph)
if update_master_grads:
self.update_master_grads()
def update_master_grads(self):
# torch.cuda.nvtx.range_push("update_master_grads")
"""
Copy the ``.grad`` attribute from stored references to fp16 parameters to
the ``.grad`` attribute of the fp32 master parameters that are directly
updated by the optimizer. :attr:`update_master_grads` only needs to be called if
``fp16_optimizer_obj.backward`` was called with ``update_master_grads=False``.
"""
# if self.dynamic_loss_scale:
# self._check_overflow()
# if self.overflow: return
# self._model_grads_to_master_grads()
# self._downscale_master()
# Use the one-shot multi-tensor apply kernel
self.loss_scaler.clear_overflow_state()
if len(self.all_fp16_params) > 0:
# print("Model grads before")
# print([param.grad.data for param in self.all_fp16_params])
# I'm ONLY writing this as an incremental way to make some tests pass until
# I can refactor the tests as well.
# FP16_Optimizer should not be used by anyone.
model_grads = []
master_grads = []
for model_param, master_param in zip(self.all_fp16_params,
self.all_fp32_from_fp16_params):
if model_param.grad is not None:
model_grads.append(model_param.grad)
if master_param.grad is None:
master_param.grad = torch.empty_like(master_param)
master_grads.append(master_param.grad)
self.loss_scaler.unscale(
model_grads,
master_grads,
self.loss_scaler.loss_scale())
# print("Master grads after")
# print([param.grad.data for param in self.all_fp32_from_fp16_params])
if len(self.all_fp32_from_fp32_params) > 0:
model_grads = []
master_grads = []
for model_param, master_param in zip(self.all_fp32_from_fp32_params,
self.all_fp32_from_fp32_params):
if model_param.grad is not None:
model_grads.append(model_param.grad)
master_grads.append(master_param.grad)
# print("Model grads before")
# print([param.grad.data for param in self.all_fp32_from_fp32_params])
self.loss_scaler.unscale(
model_grads,
master_grads,
self.loss_scaler.loss_scale())
# print("Master grads after")
# print([param.grad.data for param in self.all_fp32_from_fp32_params])
# quit()
self.overflow = self.loss_scaler.update_scale()
# torch.cuda.nvtx.range_pop()
def inspect_master_grad_data(self):
"""
When running with :class:`FP16_Optimizer`,
``.grad`` attributes of a model's fp16 leaves should not be
regarded as truthful, because they might be scaled.
After a call to :attr:`fp16_optimizer_obj.backward(loss)`, if no overflow was encountered,
the fp32 master params' ``.grad``
attributes will contain valid gradients properly divided by the loss scale. However,
because :class:`FP16_Optimizer` flattens some parameters, accessing them may be
nonintuitive. :attr:`inspect_master_grad_data`
allows those gradients to be viewed with shapes corresponding to their associated model leaves.
Returns:
List of lists (one list for each parameter group). The list for each parameter group
is a list of the ``.grad.data`` attributes of the fp32 master params belonging to that group.
"""
if self.overflow:
print("Warning: calling FP16_Optimizer.inspect_master_grad_data while in an overflow state. "
"Gradients are currently invalid (may be inf, nan, or stale). Returning None.")
return None
else:
# The optimizer owns only references to master params.
master_grads_data = []
for param_group in self.optimizer.param_groups:
master_grads_this_group = []
for param in param_group['params']:
if param.grad is not None:
master_grads_this_group.append(param.grad.data)
else:
master_grads_this_group.append(None)
master_grads_data.append(master_grads_this_group)
return master_grads_data
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
return self.loss_scaler.loss_scale()
def _set_loss_scale(self, value):
self.loss_scaler._loss_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
| GeneSplice-main | GeneSplice/apex/apex/fp16_utils/fp16_optimizer.py |
import torch
# item() is a recent addition, so this helps with backward compatibility.
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
class LossScaler:
"""
Class that manages a static loss scale. This class is intended to interact with
:class:`FP16_Optimizer`, and should not be directly manipulated by the user.
Use of :class:`LossScaler` is enabled via the ``static_loss_scale`` argument to
:class:`FP16_Optimizer`'s constructor.
Args:
scale (float, optional, default=1.0): The loss scale.
"""
def __init__(self, scale=1):
from apex import deprecated_warning
deprecated_warning("apex.fp16_utils is deprecated and will be removed by the end of February 2023. Use [PyTorch AMP](https://pytorch.org/docs/stable/amp.html)")
self.cur_scale = scale
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
return False
def update_scale(self, overflow):
pass
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss, retain_graph=False):
scaled_loss = loss*self.loss_scale
scaled_loss.backward(retain_graph=retain_graph)
class DynamicLossScaler:
"""
Class that manages dynamic loss scaling. It is recommended to use :class:`DynamicLossScaler`
indirectly, by supplying ``dynamic_loss_scale=True`` to the constructor of
:class:`FP16_Optimizer`. However, it's important to understand how :class:`DynamicLossScaler`
operates, because the default options can be changed using the
the ``dynamic_loss_args`` argument to :class:`FP16_Optimizer`'s constructor.
Loss scaling is designed to combat the problem of underflowing gradients encountered at long
times when training fp16 networks. Dynamic loss scaling begins by attempting a very high loss
scale. Ironically, this may result in OVERflowing gradients. If overflowing gradients are
encountered, :class:`DynamicLossScaler` informs :class:`FP16_Optimizer` that an overflow has
occurred.
:class:`FP16_Optimizer` then skips the update step for this particular iteration/minibatch,
and :class:`DynamicLossScaler` adjusts the loss scale to a lower value.
If a certain number of iterations occur without overflowing gradients detected,
:class:`DynamicLossScaler` increases the loss scale once more.
In this way :class:`DynamicLossScaler` attempts to "ride the edge" of
always using the highest loss scale possible without incurring overflow.
Args:
init_scale (float, optional, default=2**32): Initial loss scale attempted by :class:`DynamicLossScaler.`
scale_factor (float, optional, default=2.0): Factor used when adjusting the loss scale. If an overflow is encountered, the loss scale is readjusted to loss scale/``scale_factor``. If ``scale_window`` consecutive iterations take place without an overflow, the loss scale is readjusted to loss_scale*``scale_factor``.
scale_window (int, optional, default=1000): Number of consecutive iterations without an overflow to wait before increasing the loss scale.
"""
def __init__(self,
init_scale=2**32,
scale_factor=2.,
scale_window=1000):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
for p in params:
if p.grad is not None and DynamicLossScaler._has_inf_or_nan(p.grad.data):
return True
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
# `overflow` is boolean indicating whether the gradient overflowed
def update_scale(self, overflow):
if overflow:
# self.cur_scale /= self.scale_factor
self.cur_scale = max(self.cur_scale/self.scale_factor, 1)
self.last_overflow_iter = self.cur_iter
else:
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
self.cur_scale *= self.scale_factor
self.cur_iter += 1
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss, retain_graph=False):
scaled_loss = loss*self.loss_scale
scaled_loss.backward(retain_graph=retain_graph)
##############################################################
# Example usage below here -- assuming it's in a separate file
##############################################################
"""
TO-DO separate out into an example.
if __name__ == "__main__":
import torch
from torch.autograd import Variable
from dynamic_loss_scaler import DynamicLossScaler
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in), requires_grad=False)
y = Variable(torch.randn(N, D_out), requires_grad=False)
w1 = Variable(torch.randn(D_in, H), requires_grad=True)
w2 = Variable(torch.randn(H, D_out), requires_grad=True)
parameters = [w1, w2]
learning_rate = 1e-6
optimizer = torch.optim.SGD(parameters, lr=learning_rate)
loss_scaler = DynamicLossScaler()
for t in range(500):
y_pred = x.mm(w1).clamp(min=0).mm(w2)
loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale
print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale))
print('Iter {} scaled loss: {}'.format(t, loss.data[0]))
print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale))
# Run backprop
optimizer.zero_grad()
loss.backward()
# Check for overflow
has_overflow = DynamicLossScaler.has_overflow(parameters)
# If no overflow, unscale grad and update as usual
if not has_overflow:
for param in parameters:
param.grad.data.mul_(1. / loss_scaler.loss_scale)
optimizer.step()
# Otherwise, don't do anything -- ie, skip iteration
else:
print('OVERFLOW!')
# Update loss scale for next iteration
loss_scaler.update_scale(has_overflow)
"""
| GeneSplice-main | GeneSplice/apex/apex/fp16_utils/loss_scaler.py |
from .multi_tensor_apply import MultiTensorApply
multi_tensor_applier = MultiTensorApply(2048*32)
| GeneSplice-main | GeneSplice/apex/apex/multi_tensor_apply/__init__.py |
import torch
class MultiTensorApply(object):
available = False
warned = False
def __init__(self, chunk_size):
try:
import amp_C
MultiTensorApply.available = True
self.chunk_size = chunk_size
except ImportError as err:
MultiTensorApply.available = False
MultiTensorApply.import_err = err
def check_avail(self):
if MultiTensorApply.available == False:
raise RuntimeError(
"Attempted to call MultiTensorApply method, but MultiTensorApply "
"is not available, possibly because Apex was installed without "
"--cpp_ext --cuda_ext. Original import error message:",
MultiTensorApply.import_err)
def __call__(self, op, noop_flag_buffer, tensor_lists, *args):
self.check_avail()
return op(self.chunk_size,
noop_flag_buffer,
tensor_lists,
*args)
| GeneSplice-main | GeneSplice/apex/apex/multi_tensor_apply/multi_tensor_apply.py |
GeneSplice-main | GeneSplice/apex/apex/contrib/__init__.py |
|
import torch
import fused_index_mul_2d
class IndexMul2d_(torch.autograd.Function):
'''
Currently only support index in dimension 0 with a 2-dimension tensor.
The shape of indexed in1 must be same with in2. Now this kernel does not support broadcast.
The datatype must be float32 or float16.
'''
@staticmethod
def forward(ctx, in1: torch.Tensor, in2: torch.Tensor, idx1: torch.Tensor) -> torch.Tensor:
assert in2.size(0) == idx1.size(0)
if ((in1.dtype != torch.float32 and in1.dtype != torch.half) or in2.dtype != in1.dtype):
raise RuntimeError("input1'dtype and input2's dtype must be fp32 or fp16. And input type must be same")
if (in1.dim() != 2 or in2.dim() != 2):
raise RuntimeError("in1 and in2 must be 2-dimension tensor.")
if (idx1.dim() != 1):
raise RuntimeError("idx1 must be 1-dimension tensor.")
if not in1.is_contiguous():
in1 = in1.contiguous()
if not in2.is_contiguous():
in2 = in2.contiguous()
if not idx1.is_contiguous():
idx1 = idx1.contiguous()
assert in1.is_contiguous()
assert in2.is_contiguous()
assert idx1.is_contiguous()
out = torch.empty_like(in2)
if (in1.dtype == torch.float32):
fused_index_mul_2d.float_forward(
out,
in1,
in2,
idx1)
elif (in1.dtype == torch.half):
fused_index_mul_2d.half_forward(
out,
in1,
in2,
idx1)
ctx.for_backwards = (in1, in2, idx1)
return out
@staticmethod
def backward(ctx, grad_out):
in1, in2, idx1 = ctx.for_backwards
grad_in1, grad_in2 = index_mul_2d_backward(in1, in2, idx1, grad_out)
return grad_in1, grad_in2, None
class IndexMul2dBackward_(torch.autograd.Function):
@staticmethod
def forward(ctx, in1: torch.Tensor, in2: torch.Tensor, idx1: torch.Tensor,
grad_out: torch.Tensor) -> torch.Tensor:
if not in1.is_contiguous():
in1 = in1.contiguous()
if not in2.is_contiguous():
in2 = in2.contiguous()
if not idx1.is_contiguous():
idx1 = idx1.contiguous()
if not grad_out.is_contiguous():
grad_out = grad_out.contiguous()
assert in1.is_contiguous()
assert in2.is_contiguous()
assert idx1.is_contiguous()
assert grad_out.is_contiguous()
grad_in1 = torch.zeros_like(in1)
grad_in2 = torch.empty_like(in2)
if (in1.dtype == torch.float32):
fused_index_mul_2d.float_backward(
grad_in1,
grad_in2,
grad_out,
in1,
in2,
idx1)
elif (in1.dtype == torch.half):
fused_index_mul_2d.half_backward(
grad_in1,
grad_in2,
grad_out,
in1,
in2,
idx1)
ctx.for_backwards = (in1, in2, idx1, grad_out)
return grad_in1, grad_in2
@staticmethod
def backward(ctx, grad_grad_in1, grad_grad_in2):
if not grad_grad_in1.is_contiguous():
grad_grad_in1 = grad_grad_in1.contiguous()
if not grad_grad_in2.is_contiguous():
grad_grad_in2 = grad_grad_in2.contiguous()
assert grad_grad_in1.is_contiguous()
assert grad_grad_in2.is_contiguous()
in1, in2, idx1, grad_out = ctx.for_backwards
grad_in1 = torch.zeros_like(in1)
grad_in2 = torch.empty_like(in2)
grad_grad_out = torch.empty_like(grad_out)
if (in1.dtype == torch.float32):
fused_index_mul_2d.float_backward_backward(
grad_grad_out,
grad_in1,
grad_in2,
grad_out,
grad_grad_in1,
grad_grad_in2,
in1,
in2,
idx1)
elif (in1.dtype == torch.half):
fused_index_mul_2d.half_backward_backward(
grad_grad_out,
grad_in1,
grad_in2,
grad_out,
grad_grad_in1,
grad_grad_in2,
in1,
in2,
idx1)
return grad_in1, grad_in2, None, grad_grad_out
index_mul_2d = IndexMul2d_.apply
index_mul_2d_backward = IndexMul2dBackward_.apply
| GeneSplice-main | GeneSplice/apex/apex/contrib/index_mul_2d/index_mul_2d.py |
from .index_mul_2d import index_mul_2d
| GeneSplice-main | GeneSplice/apex/apex/contrib/index_mul_2d/__init__.py |
from .sparse_masklib import create_mask
from .asp import ASP
| GeneSplice-main | GeneSplice/apex/apex/contrib/sparsity/__init__.py |
import types
import torch
from .sparse_masklib import create_mask
from .permutation_lib import Permutation
torchvision_imported=True
try:
import torchvision
except ImportError:
print("[ASP][Warning] torchvision cannot be imported.")
torchvision_imported=False
import json
import os
import string
import time
def eligible_modules(model, whitelist_layer_types, allowed_layer_names, disallowed_layer_names):
eligible_modules_list = []
for name, mod in model.named_modules():
if isinstance(mod, whitelist_layer_types) and name not in disallowed_layer_names:
if allowed_layer_names is not None and name not in allowed_layer_names:
continue
eligible_modules_list.append((name, mod))
return eligible_modules_list
class ASP:
__model = None
__verbosity = 0
__optimizer = None
__sparse_parameters = []
__calculate_mask = None
__allow_permutation = True
__all_parameters = []
__save_permutation_graph = False
__permutation_output_dir = ''
@classmethod
def init_model_for_pruning(cls, model, mask_calculator="m4n2_1d",
verbosity=3,
whitelist=[torch.nn.Linear, torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.MultiheadAttention],
allowed_layer_names=None, disallowed_layer_names=[],
allow_recompute_mask=False, custom_layer_dict={},
allow_permutation=True):
"""Call this method to modify your model to take advantage of sparse matrix multiplication.
Note that this call alone only augments the model with additional buffers needed for sparse MMA,
it does not enable use of sparse MMA.
If you are starting with a fresh model:
model = ...
ASP.init_model_for_pruning(model, mask_calculator, ...)
if (training) ASP.init_optimizer_for_pruning(optimizer)
ASP.compute_sparse_masks() // sparsity is off by default, call when youy want to enable it.
If you are starting from a checkpoint:
model = ...
ASP.init_model_for_pruning(model, mask_calculator, ...)
torch.load(...)
if (training) ASP.init_optimizer_for_pruning(optimizer)
Arguments:
model The model
mask_calculator Either callable that computes mask given a tensor OR pattern string for sparse mask lib.
verbosity Integer controling verbosity level.
0 -> Only errors.
1 -> Errors and warnings.
2 -> Errors, warnings and info.
3 -> Errors, warnings, info and debug.
whitelist Module types approved for sparsity.
allowed_layer_names If not None, only layer names that appear in this list are considered for sparsity.
disallowed_layer_names If not [], only layer names that do not appear in this list are considered for sparsity.
allow_recompute_mask If True, stores pruned values so that dense weights can be restored.
Pruned weights are stored in CPU memory, hence this option does not increase GPU memory usage.
custom_layer_dict Dictionary of additional layer paremeters to sparsify. e.g. {CustomLinear: ['weight']}
allow_permutation If True, allow the input channel permutation to ease the influence of weight pruning.
[Future] Support for allow_recompute_mask can be removed, it is not part of sparse inference recipe.
"""
assert (cls.__model is None), "ASP has been initialized already."
cls.__model = model
cls.__verbosity = verbosity
cls.__allow_permutation = allow_permutation
if isinstance(mask_calculator, str):
def create_mask_from_pattern(param):
return create_mask(param, mask_calculator).bool()
cls.__calculate_mask = create_mask_from_pattern
else:
cls.__calculate_mask = mask_calculator #user defined function
# function to extract variables that will be sparsified.
# idea is that you will add one of these functions for each module type that can be sparsified.
if torchvision_imported:
print("[ASP] torchvision is imported, can work with the MaskRCNN/KeypointRCNN from torchvision.")
torchvision_version = str(torchvision.__version__)
torchvision_version_major = int(torchvision_version.split('.')[0])
torchvision_version_minor = int(torchvision_version.split('.')[1])
if torchvision_version_major == 0 and torchvision_version_minor < 12:
sparse_parameter_list = {torch.nn.Linear: ['weight'], torch.nn.Conv1d: ['weight'], torch.nn.Conv2d: ['weight'], torch.nn.Conv3d: ['weight'], torch.nn.modules.linear.NonDynamicallyQuantizableLinear: ['weight'], torch.nn.MultiheadAttention: ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight'], torchvision.ops.misc.Conv2d: ['weight']}
else: # Torchvision remove APIs that were deprecated before 0.8 (#5386) in 0.12.0, torchvision.ops.misc.Conv2d is removed
sparse_parameter_list = {torch.nn.Linear: ['weight'], torch.nn.Conv1d: ['weight'], torch.nn.Conv2d: ['weight'], torch.nn.Conv3d: ['weight'], torch.nn.modules.linear.NonDynamicallyQuantizableLinear: ['weight'], torch.nn.MultiheadAttention: ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight']}
else:
sparse_parameter_list = {torch.nn.Linear: ['weight'], torch.nn.Conv1d: ['weight'], torch.nn.Conv2d: ['weight'], torch.nn.Conv3d: ['weight'], torch.nn.modules.linear.NonDynamicallyQuantizableLinear: ['weight'], torch.nn.MultiheadAttention: ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight']}
if custom_layer_dict: # Update default list to include user supplied custom (layer type : parameter tensor), make sure this tensor type is something ASP knows how to prune
sparse_parameter_list.update(custom_layer_dict)
whitelist += list(custom_layer_dict.keys())
for module_type in whitelist:
assert (module_type in sparse_parameter_list), "Module %s :: Don't know how to sparsify module." % module.dtype()
# find all sparse modules, extract sparse parameters and decorate
def add_sparse_attributes(module_name, module):
sparse_parameters = sparse_parameter_list[type(module)]
for p_name, p in module.named_parameters():
if p_name in sparse_parameters and p.requires_grad:
# check for NVIDIA's TC compatibility: we check along the horizontal direction
if p.dtype == torch.float32 and ((p.size()[0] % 8) != 0 or (p.size()[1] % 16) != 0): #User defines FP32 and APEX internally uses FP16 math
print("[ASP] Auto skipping pruning %s::%s of size=%s and type=%s for sparsity" % (module_name, p_name, str(p.size()), str(p.dtype)))
continue
if p.dtype == torch.float16 and ((p.size()[0] % 8) != 0 or (p.size()[1] % 16) != 0): #For Conv2d dim= K x CRS; we prune along C
print("[ASP] Auto skipping pruning %s::%s of size=%s and type=%s for sparsity" % (module_name, p_name, str(p.size()), str(p.dtype)))
continue
if cls.__verbosity >= 3:
print("[ASP] Sparsifying %s::%s of size=%s and type=%s for sparsity" % (module_name, p_name, str(p.size()), str(p.dtype)))
mask = torch.ones_like(p).bool()
buffname = p_name.split(".")[-1] # buffer names cannot contain "."
module.register_buffer('__%s_mma_mask' % buffname, mask)
if allow_recompute_mask:
pruned = torch.zeros_like(p).cpu()
module.register_buffer('__%s_mma_pruned_p' % buffname, pruned)
else:
pruned = None
cls.__sparse_parameters.append((module_name, module, p_name, p, mask, pruned))
else:
if cls.__verbosity >= 3:
print("[ASP] Not sparsifying %s::%s of size=%s and type=%s" % (module_name, p_name, str(p.size()), str(p.dtype)))
for name, sparse_module in eligible_modules(model, tuple(whitelist), allowed_layer_names, disallowed_layer_names):
add_sparse_attributes(name, sparse_module)
if allow_permutation: # find all named modules, extract parameters and decorate, used for offline permutation in K dim
for module_name, module in model.named_modules():
module_type_str = str(type(module)).split("\'")[1]
if module_type_str == 'torch.nn.modules.container.Sequential' or module_type_str.startswith('torchvision.models'):
# filter out the 'torch.nn.modules.container.Sequential' type and the whole model, like 'torchvision.models.vgg.VGG'
continue
for p_name, p in module.named_parameters():
cls.__all_parameters.append((module_name, module, p_name, p))
if module_type_str == 'torch.nn.modules.batchnorm.BatchNorm2d':
# need to get the running_mean and running_var from model.state_dict(), as they are not the learnable parameters
module_mean_name = module_name + '.running_mean'
module_var_name = module_name + '.running_var'
for param_key in model.state_dict():
if module_mean_name == param_key or module_var_name == param_key:
cls.__all_parameters.append((module_name, module, param_key.split(".")[-1], model.state_dict()[param_key]))
# add the __permutation_output_dir field to save the intermediate results for permutation
cls.__permutation_output_dir = '.'
# Set the corresponding params from ASP class to the Permutation class
permutation_verbosity = 5
Permutation.set_permutation_params_from_asp(cls.__model, cls.__sparse_parameters, cls.__all_parameters, permutation_verbosity)
# Set the identical random seed for all GPUs to make sure the same results generated in permutation search
Permutation.set_identical_seed()
@classmethod
def already_init_asp_model(cls):
"""Call this method to check whether ASP has been initialized already.
"""
if cls.__model is None:
if cls.__verbosity >= 3:
print("[ASP] ASP has not been initialized.")
return False
else:
if cls.__verbosity >= 3:
print("[ASP] ASP has been initialized already.")
return True
@classmethod
def init_optimizer_for_pruning(cls, optimizer):
"""Call this method to monkey patch optimizer step function so that masks can be applied to
gradients and weights during training.
You must call init_model_for_pruning(...) before calling init_optimizer_for_pruning(...)
"""
assert (cls.__optimizer is None), "ASP has initialized optimizer already."
assert (cls.__calculate_mask is not None), "Called ASP.init_optimizer_for_pruning before ASP.init_model_for_pruning."
# store pointer to original optimizer step method
cls.__optimizer = optimizer
cls.__optimizer.__step = optimizer.step
def __step(opt_self, *args, **kwargs):
# prune gradients before step method
with torch.no_grad():
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
if p.grad is not None: #thx pjudd
p.grad.mul_(mask)
# call original optimizer step method
rval = opt_self.__step(*args, **kwargs)
# prune parameters after step method
with torch.no_grad():
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
p.mul_(mask)
return rval
cls.__optimizer.step = types.MethodType(__step, cls.__optimizer)
@classmethod
def compute_sparse_masks(cls):
"""Call this method to enable sparsity.
If init(...) was called with allow_recompute_mask=False AND sparsity is disabled, pruned field can be None.
"""
with torch.no_grad():
if cls.__allow_permutation:
# Step 1: use the Torch.FX library to build the graph
# Step 2: permutation search with the customized kernel
# The simplest without user intervention:
# A. try to import with the distributed mode of the original model
# B. if meet the error, import with the none-distributed mode of the original model
start_time_permute = time.perf_counter()
successful_permutation = False
try:
successful_permutation = Permutation.permute_model(cls.__model.module, dump_fx_graph=cls.__save_permutation_graph, save_dumped_fx_graph=os.path.join(cls.__permutation_output_dir, 'model_offline_permutation_graph.json'))
if successful_permutation:
print("\n[compute_sparse_masks] permuted the (distributed) model.")
except AttributeError:
successful_permutation = Permutation.permute_model(cls.__model, dump_fx_graph=cls.__save_permutation_graph, save_dumped_fx_graph=os.path.join(cls.__permutation_output_dir, 'model_offline_permutation_graph.json'))
if successful_permutation:
print("\n[compute_sparse_masks] permuted the model.")
if successful_permutation:
duration_build_offline_permutation_graph = time.perf_counter() - start_time_permute
print("[compute_sparse_masks] Take {:.4f} seconds to find and apply permutations.".format(duration_build_offline_permutation_graph))
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
if mask.sum() < mask.numel(): # when recalculating masks
# restore dense parameter if allow_recompute_mask is enabled
assert (pruned is not None), "Unable to restore dense parameter because allow_recompute_mask == False"
p.add_(pruned.cuda())
mask.set_(cls.__calculate_mask(p))
if pruned is not None: # stow away pruned weights to cpu
pruned.set_((p * (~mask)).cpu())
p.mul_(mask) # in-place multiplication, so pruned weights are 0-values, hence checkpoint will have 0s for pruned weights
if cls.__verbosity >= 2:
print("[ASP] Enabled %.2f%% sparsity for %s::%s of size=%s and type=%s with magnitude %s" % (100.0-100.0*mask.sum()/mask.numel(), module_name, p_name, str(p.size()), str(p.dtype), torch.sum(torch.abs(p))))
@classmethod
def restore_pruned_weights(cls):
"""Call this method to disable sparsity and restore all weights.
This will only work if init(...) was called with allow_recompute=True.
"""
with torch.no_grad():
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
if mask.sum() < mask.numel():
assert (pruned is not None), "Unable to restore dense parameter because allow_recompute_mask == False"
p.add_(pruned.cuda())
mask.fill_(1)
pruned.zero_()
if cls.__verbosity >= 2:
print("[ASP] Disabled sparsity for %s::%s (dense weights restored)" % (module_name, p_name))
@classmethod
def is_sparsity_enabled(cls):
"""Call this method to determine if sparsity is enabled in the model.
The typical use case is right after checkpoint has been loaded.
"""
total,sp100,sp50 = 0,0,0
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
total += 1
mask_sum = mask.sum()
mask_numel = mask.numel()
if mask_sum == mask_numel:
sp100 += 1
elif mask_sum*2 == mask_numel:
sp50 += 1
assert (total == sp100 or total == sp50), "Inconsistent model sparsity"
if total == sp100:
return False
elif total == sp50:
return True
@classmethod
def prune_trained_model(cls, model, optimizer):
# add mask buffers to model (init_model_for_pruning), augment optimizer (init_optimizer_for_pruning) and compute masks (compute_sparse_masks)
cls.init_model_for_pruning(model, mask_calculator="m4n2_1d", verbosity=2, whitelist=[torch.nn.Linear, torch.nn.Conv2d, torch.nn.MultiheadAttention], allow_recompute_mask=False)
cls.init_optimizer_for_pruning(optimizer)
cls.compute_sparse_masks()
@classmethod
def set_permutation_saving_params(cls, allow_permutation=True, save_permutation_graph=False, permutation_output_dir='.'):
"""This function is used to set the permutation saving related parameters in ASP class and inside of the Permutation class."""
print("\n[ASP][set_permutation_saving_param] Set permutation saving related parameters")
print("\n[set_permutation_saving_param] Set permutation saving related parameters")
cls.__allow_permutation = allow_permutation
print("[set_permutation_saving_param]\t Allow permutation: {}".format(cls.__allow_permutation))
cls.__save_permutation_graph = save_permutation_graph
print("[set_permutation_saving_param]\t Save permutation graphs: {}".format(cls.__save_permutation_graph))
cls.__permutation_output_dir = permutation_output_dir
print("[set_permutation_saving_param]\t Permutation graphs saving dir: {}".format(cls.__permutation_output_dir))
Permutation.set_permutation_saving_params(allow_permutation, save_permutation_graph, permutation_output_dir)
| GeneSplice-main | GeneSplice/apex/apex/contrib/sparsity/asp.py |
import os
import torch
import json
import string
import time
import numpy as np
import sys
import builtins as __builtin__
import io
try:
from .permutation_search_kernels import accelerated_search_for_good_permutation, sum_after_2_to_4
print("[ASP][Info] permutation_search_kernels can be imported.")
except ImportError:
print("[ASP][Warning] permutation_search_kernels cannot be imported.")
print("[ASP][Warning] If you want to accelerate the permutation search process by GPU, please build APEX by following the instructions at https://github.com/NVIDIA/apex/blob/master/apex/contrib/sparsity/README.md")
def convert_fx_node_name(fx_node_name):
"""Standardize punctuation of a node's name: replace all '_' with '.'"""
return fx_node_name.replace('_', '.')
def get_node_parent_children(fx_node):
"""Populate lists of all direct parents and children of a node"""
# get node parent list, and convert node name to module name
node_parent_name_converted = []
if len(fx_node.all_input_nodes) > 0:
node_parent = fx_node.all_input_nodes
for item in node_parent:
converted_item = convert_fx_node_name(item.name)
node_parent_name_converted.append(converted_item)
else:
node_parent = []
# get node children list, and convert node name to module name
node_children_name_converted = []
if len(list(fx_node.users.keys())) > 0:
node_children = list(fx_node.users.keys())
for item in node_children:
converted_item = convert_fx_node_name(item.name)
node_children_name_converted.append(converted_item)
else:
node_children = []
return node_parent_name_converted, node_children_name_converted
def node_name_matches(node_name, module_name):
"""Check for a match between graph node name and stored module name, accounting for formatting and DDP training differences"""
# process: remove all punctuation, everything to lower case
def process(name):
return ''.join(c for c in name if c not in string.punctuation).lower()
processed_node_name = process(node_name)
processed_module_name = process(module_name)
# module names start with 'module.' in distributed data-parallel training, but fx graph node names don't; check for both
distributed_node_name = 'module.' + node_name
distributed_processed_node_name = 'module' + processed_node_name
return (node_name == module_name) or (distributed_node_name == module_name) or (processed_node_name == processed_module_name) or (distributed_processed_node_name == processed_module_name)
def replicate_sequence(sequence, replications):
"""Replicate a permutation to apply it to an even multiple of channel counts"""
replicated_sequence = []
for rep in range(replications):
offset = len(sequence) * rep
for c in sequence:
replicated_sequence.append(c+offset)
return replicated_sequence
class Permutation:
__model = None
__sparse_parameters = []
__allow_permutation = False
__all_parameters = []
__verbosity = 0 ## 0: errors only, 1: also high-level details, warnings, 2: also intermediate steps, 3: everything
__params_permuted_in_C = []
__params_permuted_in_K = []
__unpermuted_dims = []
__save_permutation_graph = False
__permutation_output_dir = ''
__manual_seed = None
__tcpstore_port = 2341
# these module types may be the target of permutations (have potentially sparse weights or are attributes with no parents)
__permutation_target_module_types = ['torch.nn.modules.conv.Conv1d',
'torch.nn.modules.conv.Conv2d',
'torch.nn.modules.linear.Linear',
'torch.nn.modules.linear.LazyLinear',
'torch.nn.modules.linear.NonDynamicallyQuantizableLinear',
'torch.nn.modules.activation.MultiheadAttention',
'get_attr']
# these module types are not permuted, but must pass any permutation seen by a child's C or passed-thru K to the parents' K
__simple_passthru_module_types = ['torch.nn.modules.activation.ReLU6',
'torch.nn.modules.activation.ReLU',
'torch.nn.modules.dropout.Dropout',
'torch.nn.modules.dropout.Dropout1d',
'torch.nn.modules.dropout.Dropout2d',
'torch.nn.modules.dropout.Dropout3d',
'torch.nn.modules.dropout.AlphaDropout',
'torch.nn.modules.dropout.FeatureAlphaDropout',
'torch.nn.modules.pooling.MaxPool2d',
'torch.nn.modules.pooling.AdaptiveAvgPool2d',
'torch.nn.modules.pooling.AvgPool2d',
'torch.nn.modules.activation.Hardsigmoid',
'torch.nn.modules.activation.Hardswish',
'torch.nn.modules.activation.GELU',
'torch.nn.modules.normalization.LocalResponseNorm',
'torch.nn.modules.activation.Softmin',
'torch.nn.modules.activation.Softmax',
'torch.nn.modules.activation.Softmax2d',
'torch.nn.modules.activation.LogSoftmax',
'torch.nn.modules.activation.AdaptiveLogSoftmaxWithLoss',
'torch.nn.modules.activation.SiLU',
'torch.nn.modules.activation.Sigmoid',
'concat',
'torch.nn.modules.flatten.Flatten' # if it's a problem, it'll be handled via dimension mismatch check
]
# these module types have parameters that must be permuted along K as well as need to pass the permutation thru to parents' K
__permute_K_and_passthru_module_types = ['torch.nn.modules.batchnorm.BatchNorm2d',
'torch.nn.modules.normalization.LayerNorm',
'torch.nn.modules.instancenorm.InstanceNorm2d',
'torch.nn.modules.batchnorm.SyncBatchNorm']
# these module types cannot be permuted safely (today), and cause neighboring layers to have permutations disabled
__disallow_permutations_module_types = ['torch.nn.modules.normalization.GroupNorm', # to handle: influence GCD of real children's sibling group
'torch.nn.modules.linear.Bilinear', # need to permute one input along in1_features and the other along in2_features
'torch.nn.modules.activation.GLU', # may work OOTB, but might need to explicitly handle dimsionality change
]
@classmethod
def set_identical_seed(cls, identical_seed=1):
"""Make all GPUs in DDP use the same seed to find identical permutations and not require syncing parameters later"""
if cls.__verbosity > 0:
print("[set_identical_seed] Set the identical seed: {:} for all GPUs to make sure the same results generated in permutation search".format(identical_seed))
cls.__manual_seed = identical_seed
cls.reset_seed()
@classmethod
def reset_seed(cls):
"""To find the same permutations no matter how many GPUs are used, we reset the seed before every search"""
identical_seed = cls.__manual_seed
assert identical_seed is not None, "Must call set_identical_seed() before it can be reset"
torch.manual_seed(identical_seed)
torch.cuda.manual_seed(identical_seed)
import random
np.random.seed(identical_seed)
random.seed(identical_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@classmethod
def set_tcpstore_port(cls, tcpstore_port):
"""Override the default port if it is in use in a distributed training session"""
cls.__tcpstore_port = tcpstore_port
if cls.__verbosity > 0:
print(f"[set_tcpstore_port] TCPStore port set to {cls.__tcpstore_port} .")
@classmethod
def set_permutation_saving_params(cls, allow_permutation=False, save_permutation_graph=False, permutation_output_dir='.'):
"""This function is used to set the permutation saving related parameters."""
cls.__allow_permutation = allow_permutation
cls.__save_permutation_graph = save_permutation_graph
cls.__permutation_output_dir = permutation_output_dir
if cls.__verbosity > 0:
print(f"[permutation_lib][set_permutation_saving_param] Set permutation saving related parameters\n\tAllow permutation: {cls.__alow_permutation}\n\tSave permutation graphs: {cls.__save_permutation_graph}\n\tPermutation graphs saving dir: {cls.__permutation_output_dir}")
@classmethod
def set_permutation_params_from_asp(cls, model, sparse_parameters, all_parameters, verbosity):
"""This function is used to set the permutation needed parameters from ASP class."""
cls.__verbosity = verbosity
if cls.__verbosity > 0:
print("[set_permutation_params_from_asp] Set permutation needed parameters")
cls.__model = model
cls.__sparse_parameters = sparse_parameters
cls.__all_parameters = all_parameters
if cls.__verbosity > 1:
sparse_param_names = [module_name+":"+p_name for (module_name, module, p_name, p, mask, pruned) in cls.__sparse_parameters]
all_param_names = [module_name+":"+p_name for (module_name, module, p_name, p) in cls.__all_parameters]
print(f"\tSparse parameter names: {sparse_param_names}\n\tAll parameter names: {all_param_names}")
cls.__params_permuted_in_C = []
cls.__params_permuted_in_K = []
cls.__unpermuted_dims = []
@classmethod
def permute_model(cls, model, dump_fx_graph=False, save_dumped_fx_graph='./model_permutation_graph.json'):
"""Permute a model's weights in order to maintain more magnitude after enforcing the sparsity constraint."""
if cls.__verbosity > 0:
print("\n[permute_model] Permuting the model")
# extract the output_dir, so all the intermediate fx_graph can be saved under that path
extract_output_dir=os.path.split(save_dumped_fx_graph)[0]
cls.__permutation_output_dir = extract_output_dir
fx_graph, success_in_build_fx_graph = cls.build_fx_graph(model, dump_fx_graph=dump_fx_graph, save_dumped_fx_graph=save_dumped_fx_graph)
if success_in_build_fx_graph:
fx_graph_after_init_flags = cls.init_permutation_flags(fx_graph)
fx_graph_after_find_real_parents = cls.find_real_parents(fx_graph_after_init_flags)
fx_graph_after_find_real_children = cls.find_real_children(fx_graph_after_find_real_parents)
fx_graph_after_making_groups = cls.make_sibling_coparent_groups(fx_graph_after_find_real_children)
fx_graph_after_fixup_concats = cls.fixup_concats(fx_graph_after_making_groups)
fx_graph_after_enforce_dimension_agreement = cls.enforce_dimension_agreement(fx_graph_after_fixup_concats)
fx_graph_after_propagate_flags = cls.propagate_permutation_flags(fx_graph_after_enforce_dimension_agreement)
start_time_search_for_good_permutation = time.perf_counter()
fx_graph_after_find_permutations = cls.find_permutations(fx_graph_after_propagate_flags)
if torch.distributed.is_initialized():
if cls.__verbosity > 0:
duration_search_for_good_permutation = time.perf_counter() - start_time_search_for_good_permutation
print(f"[permute_model] Rank {torch.distributed.get_rank()} completed search in {duration_search_for_good_permutation:.2f}s, waiting for others.", force=True)
torch.distributed.barrier()
duration_search_for_good_permutation = time.perf_counter() - start_time_search_for_good_permutation
if cls.__verbosity > 0:
print("\n[permute_model] Take {:.4f} seconds to finish search_for_good_permutation function.".format(duration_search_for_good_permutation))
fx_graph_after_sync_permutations = cls.sync_permutations(fx_graph_after_find_permutations)
fx_graph_after_apply_permutations = cls.apply_permutations(fx_graph_after_sync_permutations)
cls.check_graph_for_unpermuted_nodes(fx_graph_after_apply_permutations)
fx_graph = fx_graph_after_apply_permutations
if cls.__save_permutation_graph:
cls.save_graph_to_json(fx_graph, save_dumped_graph_path_with_name=os.path.join(cls.__permutation_output_dir, './model_graph_permutation_graph.json')) # save the intermediate graph as JSON file for debugging
return success_in_build_fx_graph
@classmethod
def get_permutation_stats(cls):
"""Return statistics for how many permutations were applied in various dimensions, used for testing"""
return cls.__params_permuted_in_C, cls.__params_permuted_in_K, cls.__unpermuted_dims
@classmethod
def apply_permutation_in_C_dim(cls, node_name, permutation_sequence, dryrun):
"""This function is used to permutation for a node in C dim. (Only need to handle the weight of the node) """
if cls.__verbosity > 1 and dryrun:
print("[apply_permutation_in_C_dim] Permutation for node: \'{:}\' in C dim".format(node_name))
if len(permutation_sequence) == 0:
if cls.__verbosity >= 0:
print(f"ERROR: [apply_permutation_in_C_dim] the permutation sequence for node {node_name} is empty, fail to apply permutation in C dim.")
return False
is_node_in_sparse_parameters = False
success_permutation = False
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
if node_name_matches(node_name, module_name):
if cls.__verbosity > 2 and dryrun:
print("[apply_permutation_in_C_dim] find the node: \'{:}\' \'{:}\' in cls.__sparse_parameters, succeed to apply permutation in C dim.".format(node_name, p_name))
is_node_in_sparse_parameters = True
permutation_to_apply = permutation_sequence
if p.shape[1] != len(permutation_sequence): # assumed to be grouped convolutions or concatenated weights
if p.shape[1] % len(permutation_sequence) != 0:
return False
permutation_to_apply = replicate_sequence(permutation_sequence, p.shape[1] // len(permutation_sequence))
if not dryrun:
p.data.copy_(p[:, permutation_to_apply, ...])
cls.__params_permuted_in_C.append(node_name + "." + p_name)
success_permutation = True
if not is_node_in_sparse_parameters:
# A special case: if the node itself not in sparse_module_names but one of its real_siblings in sparse_module_names, then the node will not do the permutation search, but it may need to apply the offline permutation in C dim according to the searched permutation sequence from its real_siblings in sparse_module_names
try:
for module_name_from_all_parameters, module_from_all_parameters, p_name_from_all_parameters, p_from_all_parameters in cls.__all_parameters:
if node_name_matches(node_name, module_name_from_all_parameters) and p_name_from_all_parameters == "weight":
if cls.__verbosity > 3 and dryrun:
print("[apply_permutation_in_C_dim] cannot find the node: \'{:}\' \'{:}\' in cls.__sparse_parameters, but can find in cls.__all_parameters.".format(node_name, p_name_from_all_parameters))
permutation_to_apply = permutation_sequence
if p_from_all_parameters.shape[1] != len(permutation_sequence): # assumed to be grouped convolutions
if p_from_all_parameters.shpae[1] % len(permutation_sequence) != 0:
return False
permutation_to_apply = replicate_sequence(permutation_sequence, p_from_all_parameters.shape[1] // len(permutation_sequence))
if not dryrun:
p_from_all_parameters.data.copy_(p_from_all_parameters[:, permutation_to_apply, ...])
cls.__params_permuted_in_C.append(node_name + "." + p_name_from_all_parameters)
success_permutation = True
if cls.__verbosity > 2 and dryrun:
print("[apply_permutation_in_C_dim] cannot find the node: \'{:}\' in cls.__sparse_parameters, after trying with cls.__all_parameters, succeed to apply permutation in C dim.".format(node_name))
except:
success_permutation = False
if cls.__verbosity >= 0:
print("ERROR: [apply_permutation_in_C_dim] cannot find the node: \'{:}\' in cls.__sparse_parameters, after trying with cls.__all_parameters, still fail to apply permutation in C dim.".format(node_name))
return success_permutation
@classmethod
def permute_attr(cls, node_name, permutation_sequence, fx_graph, dryrun):
""" Permute a node's attributes. Somewhat hacky, assumes that we'll find exactly one dimension with a length matching the permutation's """
assert 'attr' in fx_graph[node_name].keys()
attr = fx_graph[node_name]['attr']
if cls.__verbosity > 1:
print(f"Found attribute {node_name} of shape {attr.shape}")
found_perm = False
for dim in range(len(attr.shape)):
if attr.shape[dim] == len(permutation_sequence):
if found_perm:
if cls.__verbosity > 0:
print(f"\tWARNING: {node_name} has already been permuted, but it's trying to happen again along another dimension {dim}.")
return False
found_perm = True
if cls.__verbosity > 1 and dryrun:
print(f"\tpermuting along dimension {dim}")
if not dryrun:
# permute the dimension of interest to the front, permute within that dimension, then reset it
order = [c for c in range(len(attr.shape))]
order[0] = dim
order[dim] = 0
prmt = tuple(order)
temp_weight = torch.clone(attr)
temp_weight = torch.permute(temp_weight, prmt)
temp_weight.copy_(temp_weight[permutation_sequence, ...])
temp_weight = torch.permute(temp_weight, prmt)
attr.data.copy_(temp_weight)
cls.__params_permuted_in_K.append(node_name + "_" + str(dim))
return found_perm
@classmethod
def apply_permutation_in_K_dim(cls, node_name, permutation_sequence, fx_graph, dryrun):
"""This function is used to permutation for a node in K dim. (Need to handle the weight/bias/running_mean/running_var of the node)"""
if cls.__verbosity > 1:
print("[apply_permutation_in_K_dim] Permutation for node: \'{:}\' in K dim".format(node_name))
if len(permutation_sequence) == 0:
if cls.__verbosity >= 0:
print("ERROR: [apply_permutation_in_K_dim] the permutation sequence is empty, fail to apply permutation in K dim.")
return False
# permute attribute nodes
if 'attr' in fx_graph[node_name].keys():
return cls.permute_attr(node_name, permutation_sequence, fx_graph, dryrun)
# if we didn't store the attribute already, look in the modules' parameters
is_node_in_all_parameters = False
success_permutation = False
for module_name, module, p_name, p in cls.__all_parameters:
if node_name_matches(node_name, module_name):
if cls.__verbosity > 1 and dryrun:
print("[apply_permutation_in_K_dim] find the node: \'{:}\' with \'{:}\' in cls.__all_parameters, may succeed to apply permutation in K dim.".format(node_name, p_name))
is_node_in_all_parameters = True
permutation_to_apply = permutation_sequence
if p.shape[0] != len(permutation_sequence): # assumed to be grouped convolutions
if cls.__verbosity > 2 and dryrun:
print(f"Mismatch in K dimension between found module {module_name} {p_name} for node {node_name}: permutation length {len(permutation_sequence)} but parameter shape in K {p.shape[0]}")
if p.shape[0] % len(permutation_sequence) != 0:
return False
permutation_to_apply = replicate_sequence(permutation_sequence, p.shape[0] // len(permutation_sequence))
if cls.__verbosity > 1 and dryrun:
print("[apply_permutation_in_K_dim] the node: \'{:}\' with shape: \'{:}\' required replicating the permutation sequence with len \'{:}\' {:} times to succeed in applying the permutation in the K dimension.".format(node_name, p.shape, len(permutation_sequence), p.shape[0] // len(permutation_sequence)))
else:
if cls.__verbosity > 1 and dryrun:
print("[apply_permutation_in_K_dim] the node: \'{:}\' with shape: \'{:}\', can match the size of permutation sequence with len: \'{:}\', succeed to apply permutation in K dim.".format(node_name, p.shape, len(permutation_sequence)))
if not dryrun:
p.data.copy_(p[permutation_to_apply, ...])
cls.__params_permuted_in_K.append(node_name + "." + p_name)
success_permutation = True
if not is_node_in_all_parameters:
if cls.__verbosity >= 0:
print("ERROR: [apply_permutation_in _K_dim] cannot find the node: \'{:}\' in cls.__all_parameters, fail to apply permutation in K dim.".format(node_name))
success_permutation = False
return success_permutation
@classmethod
def check_graph_for_unpermuted_nodes(cls, fx_graph):
"""Make sure that all permutable nodes/parameters were actually permuted and all GPUs agree"""
for node_name in fx_graph.keys():
node = fx_graph[node_name]
if 'C_permutable' in node.keys() and node['C_permutable'] and not node['C_permuted']:
sibling_group_id = node['sibling_group_id']
if node['is_real'] and cls.__group_data['skipped_sibling_groups'][sibling_group_id] is None:
if cls.__verbosity >= 0:
print(f"{node_name} was C_permutable in a not skipped sibling group but was not permuted along C! {node}")
cls.__unpermuted_dims.append(node_name + "_C")
if 'K_permutable' in node.keys() and node['K_permutable'] and not node['K_permuted']:
coparent_group_id = node['coparent_group_id']
if node['is_real'] and cls.__group_data['skipped_coparent_groups'][coparent_group_id] is None:
if cls.__verbosity >= 0:
print(f"{node_name} was K_permutable in a not skipped coparent group but was not permuted along K! {node}")
cls.__unpermuted_dims.append(node_name + "_K")
if cls.__verbosity > 0:
print(f"[check_graph_for_unpermuted_nodes] found nodes that missed permutations along {len(cls.__unpermuted_dims)} dimensions.")
# make sure all GPUs agree
if torch.distributed.is_initialized():
cls.__unpermuted_dims = sorted(cls.__unpermuted_dims)
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
dist_store = torch.distributed.TCPStore("127.0.0.1", cls.__tcpstore_port, world_size, rank==0)
torch.distributed.barrier()
dist_store.set(str(rank), ','.join(cls.__unpermuted_dims))
torch.distributed.barrier()
if rank == 0:
my_list = dist_store.get('0').decode()
for peer in range(1, world_size):
peer_list = dist_store.get(str(peer)).decode()
assert my_list == peer_list, f"peer {peer} disagreed with rank 0's list of unpermuted nodes: \n{my_list}\n{peer_list}"
@classmethod
def find_sparse_parameters_for_node(cls, node_name):
"""If the node has parameters that are in the trackd sparse parameter list, find them and reshape to a 2D tensor with channels last"""
node_weight = None
# check the sparse parameters
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
if node_name_matches(node_name, module_name):
node_weight = torch.zeros_like(p)
node_weight.copy_(p)
# if we found something, reshape to concatenate along the same dimension
if node_weight is not None:
# Need to handle the concat for layers with different R & S
shape = node_weight.shape
# 1d-tensor
if len(shape) == 1:
node_weight = node_weight.view(1, shape[0])
# 2d-tensor (K, C)
elif len(shape) == 2:
node_weight = node_weight.view(shape[0], shape[1])
# 3d-tensor (K, C, R)
elif len(shape) == 3:
node_weight = node_weight.permute(0,2,1).contiguous().view(shape[0]*shape[2], shape[1])
# 4d-tensor (K, C, R, S)
elif len(shape) == 4:
# convs
node_weight = node_weight.permute(2,3,0,1).contiguous().view(shape[2]*shape[3]*shape[0], shape[1])
return node_weight
@classmethod
def find_permutation_for_matrix_group(cls, matrix_group):
"""Find a good permutation for some matrix (which may be concatenated matrices that require the same permutation)"""
if cls.__verbosity > 1:
print(f"Searching for a good permutation for this sibling group of shape {matrix_group.shape}")
permutation_found = False
num_channels = matrix_group.shape[1]
group_permutation = [c for c in range(num_channels)]
# automatic check for skipping the permutation search process
original_magnitude = (torch.abs(matrix_group)).sum(dtype=torch.float64)
pruned_magnitude = sum_after_2_to_4(matrix_group.cpu().detach().numpy())
diff_ratio = abs(original_magnitude - pruned_magnitude)/original_magnitude
epsilon = 1e-3
if cls.__verbosity > 1:
print("\n[search_for_good_permutation] Original element abs sum: {:}, Pruned element abs sum: {:}, Diff ratio: {:}".format(original_magnitude, pruned_magnitude, diff_ratio))
start_time_accelerated_search_for_good_permutation = time.perf_counter()
if diff_ratio < epsilon:
if cls.__verbosity > 2:
print("[search_for_good_permutation] Original element abs sum is almost same as the pruned element abs sum, further permutation search will not help, skipping!")
else:
if cls.__verbosity > 2:
print("[search_for_good_permutation] Original element abs sum is different from the pruned element abs sum, further permutation search will help, continue with the permutation search!")
# call the permutation search CUDA kernels as ASP extension.
# users can provide prefer search strategy by providing a valid 'search_options' as a dictionary,
# or users can implement their customized 'accelerated_search_for_good_permutation' function.
search_options = {}
# No.1 Strategy: Exhaustive Search
search_options['strategy'] = 'exhaustive'
search_options['stripe_group_size'] = 8
search_options['escape_attempts'] = 100
# No.2 Strategy: Progressive Channel Swap Search
# search_options['strategy'] = 'progressive channel swap'
# search_options['progressive_search_time_limit'] = 10
# search_options['improvement_threshold'] = 1e-9
# permutation search time is too long for matrix_group with large channel num
# change from Exhaustive Search to Progressive Channel Swap Search based on input matrix_group size
if num_channels > 2048:
search_options = {}
search_options['strategy'] = 'progressive channel swap'
search_options['progressive_search_time_limit'] = 120
search_options['improvement_threshold'] = 1e-9
if cls.__verbosity > 1:
print(f"[search_for_good_permutation] search options: {search_options}")
group_permutation = accelerated_search_for_good_permutation(matrix_group, options=search_options, verbosity=cls.__verbosity)
permutation_found = True
if cls.__verbosity > 1:
duration_accelerated_search_for_good_permutation = time.perf_counter() - start_time_accelerated_search_for_good_permutation
permuted_magnitude = sum_after_2_to_4(matrix_group.cpu().detach().numpy()[:,group_permutation])
print("[search_for_good_permutation] Take {:.4f} seconds to finish accelerated_search_for_good_permutation function and with final magnitude {:}.".format(duration_accelerated_search_for_good_permutation, permuted_magnitude))
return group_permutation, permutation_found
@classmethod
def skip_sibling_group(cls, fx_graph, sibling_group_id, reason):
"""Keep track of sibling groups that do not have permutations applied"""
# grab a parent to get the coparent group id
sibling_group = cls.__group_data['sibling_groups'][sibling_group_id]
a_sibling = list(sibling_group)[0]
a_parent = fx_graph[a_sibling]['real_parents'][0]
coparent_group_id = fx_graph[a_parent]['coparent_group_id']
if cls.__verbosity > 1:
print(f"Skipping permutations for Sibling Group {sibling_group_id} and Coparent Group {coparent_group_id}: {reason}")
cls.__group_data['skipped_sibling_groups'][sibling_group_id] = reason
cls.__group_data['skipped_coparent_groups'][coparent_group_id] = reason
@classmethod
def collect_sparse_weights(cls, fx_graph, sibling_group, sibling_group_C_param):
"""Gather all sparse weights for a sibling group (to serve as input to the permutation search)"""
matrix_group = None
for sibling in sibling_group:
node_weight = cls.find_sparse_parameters_for_node(sibling)
if node_weight is not None:
# reshape due to siblings with grouped convolutions of different sizes
assert node_weight.shape[1] % sibling_group_C_param == 0, f"sibling {sibling}'s weights' C={node_weight.shape[1]} must be even multiple of the sibling group's C parameter {sibling_group_C_param}"
node_weight = torch.reshape(node_weight, (-1, sibling_group_C_param))
if matrix_group is None:
matrix_group = node_weight
else:
try:
matrix_group = torch.cat((matrix_group, node_weight), dim = 0) # concat the weights in the K dimension, keep the same C dimension
except:
if cls.__verbosity >= 0:
print("ERROR: [search_for_good_permutation][warning] cannot merge the weight for node: \'{:}\', with its weight shape: \'{:}\', the matrix_group shape: \'{:}\'.".format(sibling, node_weight.size(), matrix_group.size()))
continue
if cls.__verbosity > 2:
print("[search_for_good_permutation] have merged the weight for node: \'{:}\', with its weight shape: \'{:}\', the matrix_group shape: \'{:}\'.".format(sibling, node_weight.size(), matrix_group.size()))
else:
if cls.__verbosity > 2:
print(f"[search_for_good_permutation] not adding dense weights for node {sibling} to the group")
return matrix_group
@classmethod
def find_sibling_group_permutation(cls, fx_graph, sibling_group_id):
""""Find a good permutation for some sibling group"""
if cls.__verbosity > 1:
print(f"Finding permutation for sibling group {sibling_group_id}")
cls.reset_seed()
sibling_group = cls.__group_data['sibling_groups'][sibling_group_id]
sibling_group_C_param = int(cls.__group_data['sibling_group_C_params'][sibling_group_id])
if sibling_group_C_param % 4 != 0 or sibling_group_C_param < 8:
cls.skip_sibling_group(fx_graph, sibling_group_id, f"Useless C: {sibling_group_C_param}")
return
# collect *sparse* weights from all siblings, get the coparent group
matrix_group = cls.collect_sparse_weights(fx_graph, sibling_group, sibling_group_C_param)
# early-out if no siblings are sparse
if matrix_group is None:
cls.skip_sibling_group(fx_graph, sibling_group_id, 'Dense')
return
# find a good permutation
group_permutation, found = cls.find_permutation_for_matrix_group(matrix_group)
# if no permutation was found, we didn't need it (input already sparse)
if not found:
cls.skip_sibling_group(fx_graph, sibling_group_id, 'Not needed')
return
if cls.__verbosity > 2:
print(f"Permutation for sibling group {sibling_group_id}: {group_permutation}")
cls.__group_data['sibling_group_permutations'][sibling_group_id] = group_permutation
@classmethod
def permute_sibling_group(cls, fx_graph, sibling_group_id, group_permutation):
"""Apply a permutation to some sibling group"""
if cls.__verbosity > 1:
print(f"Attempting to permute sibling group {sibling_group_id}")
sibling_group = cls.__group_data['sibling_groups'][sibling_group_id]
# apply the permutation in two steps: first, a dry run to find any issues.
# if there were no issues, actually apply the permutation in the second step.
success = True
coparent_group_id = None
for dryrun in [True, False]:
# apply that permutation to the siblings' C dimension
for sibling in sibling_group:
assert fx_graph[sibling]['C_permutable'] and not fx_graph[sibling]['C_permuted']
sibling_permuted = cls.apply_permutation_in_C_dim(sibling, group_permutation, dryrun)
if dryrun:
success = success and sibling_permuted
else:
assert sibling_permuted, "shouldn't fail permuting siblings after the dry run"
fx_graph[sibling]['C_permuted'] = sibling_permuted
a_parent = fx_graph[sibling]['real_parents'][0]
if coparent_group_id is None:
coparent_group_id = fx_graph[a_parent]['coparent_group_id']
else:
assert coparent_group_id == fx_graph[a_parent]['coparent_group_id'], f"parent {a_parent} must belong to the same coparent group {coparent_group_id}, not {fx_graph[a_parent]['coparent_group_id']}"
# grab the parents (and co-parents) and apply to their K dimension
coparents = cls.__group_data['coparent_groups'][coparent_group_id]
for coparent in coparents:
assert fx_graph[coparent]['K_permutable'] and not fx_graph[coparent]['K_permuted']
coparent_permuted = cls.apply_permutation_in_K_dim(coparent, group_permutation, fx_graph, dryrun)
if dryrun:
success = success and coparent_permuted
else:
assert coparent_permuted, "shouldn't fail permuting coparents after the dry run"
fx_graph[coparent]['K_permuted'] = coparent_permuted
children_permuted = cls.apply_permutation_in_K_dim_to_children(fx_graph, coparent, group_permutation, dryrun)
if dryrun:
success = success and children_permuted
else:
assert children_permuted, "shouldn't fail permuting coparents' children after the dry run"
if not success:
cls.skip_sibling_group(fx_graph, sibling_group_id, "dryrun_failure")
if cls.__verbosity > 0:
print(f"There was an issue permuting sibling group {sibling_group_id}, skipping it to preserve network quality.")
break
@classmethod
def apply_permutation_in_K_dim_to_children(cls, fx_graph, node_name, permutation, dryrun):
"""Apply a permutation along K to the children of some node"""
success = True
children = fx_graph[node_name]['children']
if cls.__verbosity > 2 and dryrun:
print(f"Applying a permutation in K to children of {node_name} : {children}")
# apply the permutation along K to children as necessary
for child in children:
if 'is_real' in fx_graph[child].keys() and fx_graph[child]['is_real']:
if cls.__verbosity > 3 and dryrun:
print(f"\tFound a real child {child}, not permuting it or its children along K")
else:
if 'module_type' not in fx_graph[child].keys() or fx_graph[child]['module_type'] == 'None':
if cls.__verbosity > 3 and dryrun:
print(f"\tPermuting children of non-module {child} along K")
success = success and cls.apply_permutation_in_K_dim_to_children(fx_graph, child, permutation, dryrun)
elif not fx_graph[child]['C_permutable']:
if fx_graph[child]['K_permutable'] and not fx_graph[child]['K_permuted']:
if cls.__verbosity > 2 and dryrun:
print(f"\tPermuting {child} along K")
child_permuted = cls.apply_permutation_in_K_dim(child, permutation, fx_graph, dryrun)
success = success and child_permuted
if not dryrun:
fx_graph[child]['K_permuted'] = child_permuted
assert fx_graph[child]['K_passthru']
if fx_graph[child]['K_passthru']:
success = success and cls.apply_permutation_in_K_dim_to_children(fx_graph, child, permutation, dryrun)
else:
if cls.__verbosity >= 0:
print(f"\t!! ERROR {child} was a not real module that was not K_passthru")
return success
@classmethod
def defer_prints(cls):
"""Collect prints from this rank in distributed mode to avoid interleaved output"""
if torch.distributed.is_initialized() and torch.distributed.get_world_size() > 1:
cls.__new_stdout = io.StringIO(str(torch.distributed.get_rank()))
cls.__builtin_print = __builtin__.print
def deferred_print(*args, **kwargs):
try: # see if torchvision examples has suppressed other ranks with the force argument
cls.__builtin_print(*args, file=cls.__new_stdout, force=True, **kwargs)
except:
cls.__builtin_print(*args, file=cls.__new_stdout, **kwargs)
__builtin__.print = deferred_print
@classmethod
def resume_prints(cls):
"""Emit the collected outputs from this rank, resume immediate printing"""
if torch.distributed.is_initialized() and torch.distributed.get_world_size() > 1:
output = cls.__new_stdout.getvalue()
__builtin__.print = cls.__builtin_print
try:
print(output, force=True)
except:
print(output)
@classmethod
def find_permutations(cls, fx_graph):
"""Search for permutations for all sibling groups"""
for sibling_group_id in cls.__group_data['sibling_groups'].keys():
search_this_group = True
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
if sibling_group_id % world_size != rank:
search_this_group = False
cls.__group_data['sibling_group_permutations'][sibling_group_id] = None
if search_this_group:
cls.defer_prints()
sibling_group = cls.__group_data['sibling_groups'][sibling_group_id]
test_node_name = list(sibling_group)[0]
if not fx_graph[test_node_name]['C_permutable']:
if cls.__verbosity > 1:
print(f"Skipping permutation for sibling group {sibling_group_id} since it does not allow permutations along C")
else:
if cls.__verbosity > 1:
print(f"Sibling group {sibling_group_id} can permute along C, permuting it")
cls.find_sibling_group_permutation(fx_graph, sibling_group_id)
cls.resume_prints()
return fx_graph
@classmethod
def sync_permutations(cls, fx_graph):
"""If multiple GPUs were involved in finding permutations, make sure everyone's in sync"""
if not torch.distributed.is_initialized():
return fx_graph
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
dist_store = torch.distributed.TCPStore("127.0.0.1", cls.__tcpstore_port, world_size, rank==0)
if cls.__verbosity > 0:
print(f"Syncing permutations found among world size {world_size}")
torch.distributed.barrier()
for sibling_group_id in sorted(cls.__group_data['sibling_groups'].keys()):
src_rank = sibling_group_id % world_size
if src_rank == rank:
to_send = cls.__group_data['sibling_group_permutations'].get(sibling_group_id, None)
skip_reason = None
if to_send is None:
skip_reason = cls.__group_data['skipped_sibling_groups'].get(sibling_group_id, None)
if skip_reason is None:
to_send = ''
else:
to_send = 'skip'
else:
to_send = ','.join(str(c) for c in to_send)
dist_store.set(str(sibling_group_id), to_send)
if skip_reason is not None:
dist_store.set(f"skip {sibling_group_id}", skip_reason)
if cls.__verbosity > 1:
print(f"{rank}: stored permutation for sibling group {sibling_group_id}", force=True)
torch.distributed.barrier()
for sibling_group_id in sorted(cls.__group_data['sibling_groups'].keys()):
permutation = dist_store.get(str(sibling_group_id)).decode()
if permutation == 'skip':
permutation = None
skip_reason = dist_store.get(f"skip {sibling_group_id}").decode()
cls.skip_sibling_group(fx_graph, sibling_group_id, skip_reason)
else:
if len(permutation) == 0:
permutation = None
else:
permutation = [int(c) for c in permutation.split(',')]
cls.__group_data['sibling_group_permutations'][sibling_group_id] = permutation
if cls.__verbosity > 1:
print(f"Got permutation for sibling group {sibling_group_id}")
torch.distributed.barrier()
return fx_graph
@classmethod
def apply_permutations(cls, fx_graph):
"""Apply all the permutations that were found to the network appropriately"""
for sibling_group_id in cls.__group_data['sibling_group_permutations'].keys():
permutation = cls.__group_data['sibling_group_permutations'][sibling_group_id]
if permutation is not None:
cls.permute_sibling_group(fx_graph, sibling_group_id, permutation)
return fx_graph
@staticmethod
def insert_MHA_out_proj(fx_graph, MHA_node, verbosity):
"""MHA nodes have a hidden out_proj node, so insert it and fix up neighboring nodes"""
if verbosity > 1:
print(f"Inserting MHA out_proj for node {MHA_node}")
out_proj_node_name = MHA_node + ".out_proj"
# insert the new node
fx_graph[out_proj_node_name] = {}
fx_graph[out_proj_node_name]['parents'] = [MHA_node]
fx_graph[out_proj_node_name]['children'] = fx_graph[MHA_node]['children']
fx_graph[MHA_node]['children'] = [out_proj_node_name]
# set the new node's properties
fx_graph[out_proj_node_name]['fx_op'] = 'call_module'
fx_graph[out_proj_node_name]['module_type'] = 'torch.nn.modules.linear.Linear'
fx_graph[out_proj_node_name]['groups_param'] = 'None'
fx_graph[out_proj_node_name]['C_param'] = fx_graph[MHA_node]['C_param']
fx_graph[out_proj_node_name]['K_param'] = fx_graph[MHA_node]['K_param']
fx_graph[out_proj_node_name]['sibling_group_id'] = None
fx_graph[out_proj_node_name]['coparent_group_id'] = None
# set permutation flags
fx_graph[out_proj_node_name]['C_permutable'] = False
fx_graph[MHA_node]['K_permutable'] = False
fx_graph[MHA_node]['C_permutable'] = True
fx_graph[out_proj_node_name]['K_permutable'] = True
fx_graph[out_proj_node_name]['K_passthru'] = False
fx_graph[out_proj_node_name]['C_permuted'] = False
fx_graph[out_proj_node_name]['K_permuted'] = False
fx_graph[out_proj_node_name]['is_real'] = True
if verbosity > 2:
print(f"\tUpdated: {MHA_node}: {fx_graph[MHA_node]}")
print(f"\tAdded: {out_proj_node_name}: {fx_graph[out_proj_node_name]}")
# update any nodes that thought their parent was the MHA node
for node in fx_graph.keys():
parents = fx_graph[node]['parents']
if node != out_proj_node_name and MHA_node in parents:
parents.remove(MHA_node)
parents.append(out_proj_node_name)
fx_graph[node]['parents'] = parents
if verbosity > 2:
print(f"\tUpdated parents of {node}: {fx_graph[node]}")
return fx_graph
@staticmethod
def init_grouped_conv_permutation_flags(fx_graph, node_name, node_groups, verbosity):
"""Handle grouped convolutions to make dimensions match"""
node_C = int(fx_graph.get(node_name).get('C_param'))
node_K = int(fx_graph.get(node_name).get('K_param'))
node_groups = int(node_groups)
if verbosity > 2:
print(f"\t{node_name} pre-divide C: {node_C}, K: {node_K}, G: {node_groups}")
assert node_C % node_groups == 0
node_C = int(node_C / node_groups)
fx_graph[node_name]['C_param'] = str(node_C)
if verbosity > 2:
print(f"\t{node_name} post-divide C: {node_C}, K: {node_K}, G: {node_groups}")
if node_C == 1: # G == C (C is pre-divided by G)
if node_groups == node_K: # true depthwise, G == C == K (C will be pre-divided by G)
fx_graph[node_name]['K_permutable'] = True
fx_graph[node_name]['K_permuted'] = False
fx_graph[node_name]['K_passthru'] = True
fx_graph[node_name]['is_real'] = False
#else: # G != K, handling a permutation along K would be very tricky and not likely useful
else: # G != C
if node_C > 4 and node_C % 4 == 0: # permutations only help if there's more than one 2:4 pruning group
fx_graph[node_name]['C_permutable'] = True
fx_graph[node_name]['C_permuted'] = False
@classmethod
def init_permutation_flags(cls, fx_graph):
"""Set the permutation flags for each node based only on that node's module type and parameters"""
if cls.__verbosity > 0:
print("\n[init_permutation_flags] Initialize the permutation flags for each node according to module type and parameters")
# initialize some graph-wide trackers
cls.__group_data = {}
cls.__group_data['next_sibling_group_id'] = 0
cls.__group_data['next_coparent_group_id'] = 0
cls.__group_data['sibling_groups'] = {}
cls.__group_data['sibling_group_permutations'] = {}
cls.__group_data['sibling_group_C_params'] = {}
cls.__group_data['skipped_sibling_groups'] = {}
cls.__group_data['coparent_groups'] = {}
cls.__group_data['skipped_coparent_groups'] = {}
# track MHA nodes
MHA_nodes = []
# initialize each node's details
for node_name in fx_graph.keys():
fx_node = fx_graph.get(node_name)
node_module_type = fx_node.get('module_type')
if cls.__verbosity > 1:
if node_module_type == 'get_attr':
print(f"Initializing node {node_name} of type {node_module_type}")
else:
print(f"Initializing node {node_name} of type {node_module_type}: {fx_node}")
# default for all nodes: don't allow anything
if node_module_type is not None:
fx_graph[node_name]['C_permutable'] = False # does this node have parameters that can be permuted in C
fx_graph[node_name]['K_permutable'] = False # does this node have parameters that can be permuted in K
fx_graph[node_name]['K_passthru'] = False # does this node need to pass a K permutation to its parents
fx_graph[node_name]['is_real'] = False
fx_graph[node_name]['C_permuted'] = False
fx_graph[node_name]['K_permuted'] = False
# initialize sibling and coparent groups
fx_graph[node_name]['sibling_group_id'] = None
fx_graph[node_name]['coparent_group_id'] = None
# update each node to be more permissive if supported
if node_module_type in cls.__permutation_target_module_types:
fx_graph[node_name]['is_real'] = True
node_groups = fx_graph.get(node_name).get('groups_param')
if (node_groups in ['None', '1']): # no groups, no constraints
fx_graph[node_name]['C_permutable'] = True
fx_graph[node_name]['K_permutable'] = True
else: # handle groups
Permutation.init_grouped_conv_permutation_flags(fx_graph, node_name, node_groups, cls.__verbosity)
elif node_module_type in cls.__permute_K_and_passthru_module_types:
fx_graph[node_name]['K_permutable'] = True
fx_graph[node_name]['K_passthru'] = True
fx_graph[node_name]['is_real'] = False
elif node_module_type in cls.__simple_passthru_module_types:
fx_graph[node_name]['K_passthru'] = True
fx_graph[node_name]['is_real'] = False
elif node_module_type in cls.__disallow_permutations_module_types:
fx_graph[node_name]['is_real'] = True
fx_graph[node_name]['C_param'] = 1
fx_graph[node_name]['K_param'] = 1
fx_graph[node_name]['groups_param'] = 1
elif 'activation' in node_module_type:
if cls.__verbosity > 0:
print(f"WARNING: how should permutation flags be initialized for node {node_name} of module type {node_module_type}? Found 'activation', assuming simple passthru behavior.")
fx_graph[node_name]['K_passthru'] = True
fx_graph[node_name]['is_real'] = False
else:
if cls.__verbosity > 0:
print(f"WARNING: how should permutation flags be initialized for node {node_name} of module type {node_module_type}? Defaulting to strict, disallowing permutations around it.")
# is_real coupled with disallowed C and K permutations will poison real parents and real children
fx_graph[node_name]['is_real'] = True
# dummy entries:
fx_graph[node_name]['C_param'] = 1
fx_graph[node_name]['K_param'] = 1
fx_graph[node_name]['groups_param'] = 1
# MHA nodes only handle the in_proj, need to add out_proj nodes explicitly
# keep track here so we can iterate directly and change fx_graph keys
if node_module_type == 'torch.nn.modules.activation.MultiheadAttention':
MHA_nodes.append(node_name)
if cls.__verbosity > 1:
if node_module_type == 'get_attr':
print(f"\tInitialized node {node_name} of type {node_module_type}")
else:
print(f"\tInitialized node {node_name} of type {node_module_type}: {fx_graph[node_name]}")
for MHA_node in MHA_nodes:
fx_graph = Permutation.insert_MHA_out_proj(fx_graph, MHA_node, cls.__verbosity)
return fx_graph
@staticmethod
def collect_siblings(fx_graph, node_name, all_siblings):
"""Recursively build a set of some node's siblings in the graph"""
# find all siblings of the requested node
siblings = set()
parents = fx_graph.get(node_name).get('real_parents')
for parent in parents:
children = fx_graph.get(parent).get('real_children')
for child in children:
siblings.add(child)
# separate the new siblings, since we'll need to process them recursively
new_siblings = siblings.difference(all_siblings)
# update the final list with just the new elements
all_siblings.update(new_siblings)
for new_sibling in new_siblings:
all_siblings = Permutation.collect_siblings(fx_graph, new_sibling, all_siblings)
return all_siblings
@staticmethod
def propagate_sibling_group(fx_graph, all_siblings, verbosity):
"""Check a sibling group for ability to be permuted, disallow all siblings and coparents if there's an issue"""
made_change = False
allow_C = True
for sibling in all_siblings:
pre_check = allow_C
allow_C = allow_C and fx_graph[sibling]['C_permutable']
if allow_C != pre_check:
if verbosity > 2:
if fx_graph[sibling]['module_type'] == 'get_attr':
print(f"\tnode {sibling} has poisoned the sibling group of {all_siblings}")
else:
print(f"\tnode {sibling} has poisoned the sibling group of {all_siblings}: {fx_graph[sibling]}")
break
if not allow_C:
for sibling in all_siblings:
made_change = made_change or fx_graph[sibling]['C_permutable']
fx_graph[sibling]['C_permutable'] = False
# only disable permutation along K for parents if this node cannot passthru, either
if not fx_graph[sibling]['K_passthru']:
sibling_parents = fx_graph[sibling]['real_parents']
for sibling_parent in sibling_parents:
made_change = made_change or fx_graph[sibling_parent]['K_permutable'] or fx_graph[sibling_parent]['K_passthru']
fx_graph[sibling_parent]['K_permutable'] = False
fx_graph[sibling_parent]['K_passthru'] = False
return made_change
@staticmethod
def collect_coparents(fx_graph, node_name, all_coparents):
"""Recursively build a set of all coparents of a particular node in the graph"""
# find all coparents of the requested node
coparents = set()
children = fx_graph.get(node_name).get('real_children')
for child in children:
parents = fx_graph.get(child).get('real_parents')
for parent in parents:
coparents.add(parent)
# coparents are used to restrict what nodes can be permuted along C, so we need to track if the current parents also pass their K permutations up
if fx_graph[parent]['K_passthru']:
grandparents = fx_graph[parent]['real_parents']
for grandparent in grandparents:
coparents = coparents.union(Permutation.collect_coparents(fx_graph, grandparent, coparents))
# separate the new coparents, since we'll need to process them recursively
new_coparents = coparents.difference(all_coparents)
# update the final list with just the new elements
all_coparents.update(new_coparents)
for new_coparent in new_coparents:
all_coparents = Permutation.collect_coparents(fx_graph, new_coparent, all_coparents)
return all_coparents
@staticmethod
def propagate_coparent_group(fx_graph, all_coparents, verbosity):
"""Check a coparent group for ability to be permuted, disallow all fellow coparents and children if there's an issue"""
# see if all coparents agree that K can be permuted
allow_K = True
made_change = False
for coparent in all_coparents:
pre_check = allow_K
allow_K = allow_K and (fx_graph[coparent]['K_permutable'] or fx_graph[coparent]['K_passthru'])
if allow_K != pre_check:
if verbosity > 2:
if fx_graph[coparent]['module_type'] == 'get_attr':
print(f"\tnode {coparent} has poisoned the coparent group of {all_coparents}")
else:
print(f"\tnode {coparent} has poisoned the coparent group of {all_coparents}: {fx_graph[coparent]}")
break
# if anyone says no, force everyone to 'no', keep track of updated state
if not allow_K:
for coparent in all_coparents:
# all coparents can no longer be permuted along K
if fx_graph[coparent]['K_permutable'] or fx_graph[coparent]['K_passthru']:
made_change = True
fx_graph[coparent]['K_permutable'] = False
fx_graph[coparent]['K_passthru'] = False
# children of coparents can't be permuted along C
coparent_children = fx_graph[coparent]['real_children']
for coparent_child in coparent_children:
if fx_graph[coparent_child]['C_permutable']:
fx_graph[coparent_child]['C_permutable'] = False
made_change = True
return made_change
@classmethod
def fixup_concats(cls, fx_graph):
"""concat operations/modules may concatenate along the channel dimension, which requires special handling (like grouped convs)"""
if cls.__verbosity > 0:
print("[fixup_concats]")
for node_name in fx_graph.keys():
fx_node = fx_graph[node_name]
if fx_node.get('module_type') == 'concat':
# get real parents, find GCD of their Ks
node_real_parents = fx_node['real_parents']
# some concats are at the front of networks (googlenet)
if len(node_real_parents) == 0:
continue
parents_K_params = []
for parent in node_real_parents:
parent_K_param = int(fx_graph[parent]['K_param'])
parents_K_params.append(parent_K_param)
fx_graph[parent]['allow_K_mismatch'] = 'concat op'
# if grouped convolutions make the input channels different among siblings different sizes,
# restrict the permutation atom to the greatest common divisor so it can be tiled as needed for each sibling (and parent)
if cls.__verbosity > 2:
print(f"\tfixing up concat node {node_name}, found parents' {node_real_parents} Ks: {parents_K_params}")
children_GCD_param = str(np.gcd.reduce(parents_K_params))
# set this to GCD of children's sibling group
sibling_group_id = -1
node_real_children = fx_node['real_children']
for child in node_real_children:
sibling_group_id = fx_graph[child]['sibling_group_id']
fx_graph[child]['C_param'] = children_GCD_param
old_children_GCD = cls.__group_data['sibling_group_C_params'][sibling_group_id]
cls.__group_data['sibling_group_C_params'][sibling_group_id] = children_GCD_param
# fixup this node's dimensions
# use the functionality of grouped convolutions
fx_node['C_param'] = children_GCD_param
fx_node['K_param'] = old_children_GCD
fx_node['groups_param'] = str(int(old_children_GCD) // int(children_GCD_param))
if cls.__verbosity > 2:
print(f"\tfixed up concat node {node_name}, found GCD of parents' {node_real_parents} K to be {children_GCD_param}, updated children's {node_real_children} C_params and sibling group {sibling_group_id} GCD")
print(f"\tthis node now: {fx_node}")
return fx_graph
@classmethod
def enforce_dimension_agreement(cls, fx_graph):
"""Check all nodes' channel dimensions against parents and children to make sure they agree; e.g. flatten ops may change these dimensions"""
if cls.__verbosity > 0:
print("[enforce_dimension_agreement]")
for node_name in fx_graph.keys():
fx_node = fx_graph[node_name]
if 'is_real' in fx_node.keys() and fx_node['is_real']:
# enforce this node's input dimension matches its parents' output dimensions
node_C = int(fx_node['C_param'])
node_K = int(fx_node['K_param'])
if fx_graph[node_name]['groups_param'] not in ['1', 'None']:
node_C = node_C * int(fx_node['groups_param'])
node_real_parents = fx_node['real_parents']
if len(node_real_parents) == 0:
if cls.__verbosity > 1:
print(f"\t{node_name} has no real parents, disabling permutations along C")
fx_graph[node_name]['C_permutable'] = False
else:
for real_parent in node_real_parents:
parent_K = int(fx_graph[real_parent]['K_param'])
ignore_mismatch = fx_graph[real_parent].get('allow_K_mismatch')
if ignore_mismatch is not None:
if cls.__verbosity > 1:
print(f"\tIgnoring dimension mismatch between {node_name} (C={node_C}) and its parent {real_parent} (K={parent_K}) as requested: {ignore_mismatch}")
elif parent_K >= 0 and node_C != parent_K:
if cls.__verbosity > 1:
print(f"\tDimensions mismatch between {node_name} (C={node_C}) and its parent {real_parent} (K={parent_K}), disallowing the relevant permutations")
fx_graph[node_name]['C_permutable'] = False
fx_graph[real_parent]['K_permutable'] = False
if cls.__verbosity > 2:
print(f"\t{fx_graph[node_name]}\n\t{fx_graph[real_parent]}")
if len(fx_graph[node_name]['real_children']) == 0:
if cls.__verbosity > 1:
print(f"\t{node_name} has no real children, disabling permutations along K")
fx_graph[node_name]['K_permutable'] = False
return fx_graph
@classmethod
def make_sibling_coparent_groups(cls, fx_graph):
"""Traverse all real nodes in the graph and collect their siblings and coparents"""
if cls.__verbosity > 0:
print("[make_sibling_coparent_groups]")
for node_name in fx_graph.keys():
fx_node = fx_graph[node_name]
if 'is_real' in fx_node.keys() and fx_node['is_real']:
sibling_group_id = fx_node['sibling_group_id']
if sibling_group_id is None: # need to make a new sibling group for this node
all_siblings = cls.collect_siblings(fx_graph, node_name, set([node_name]))
all_siblings = sorted(all_siblings) # deterministic order for DDP setups
sibling_group_id = cls.__group_data['next_sibling_group_id']
cls.__group_data['sibling_groups'][sibling_group_id] = all_siblings
cls.__group_data['next_sibling_group_id'] = sibling_group_id + 1
sibling_group_C_params = []
for sibling in all_siblings:
fx_graph[sibling]['sibling_group_id'] = sibling_group_id
sibling_C_param = int(fx_graph[sibling]['C_param'])
sibling_group_C_params.append(sibling_C_param)
# if grouped convolutions make the input channels different among siblings different sizes,
# restrict the permutation atom to the greatest common divisor so it can be tiled as needed for each sibling (and parent)
sibling_group_C_param = str(np.gcd.reduce(sibling_group_C_params))
cls.__group_data['sibling_group_C_params'][sibling_group_id] = sibling_group_C_param
cls.__group_data['skipped_sibling_groups'][sibling_group_id] = None
if cls.__verbosity > 1:
print(f"New sibling group {sibling_group_id} with GCD(C) of {sibling_group_C_param}: {all_siblings}")
coparent_group_id = fx_node['coparent_group_id']
if coparent_group_id is None:
all_coparents = cls.collect_coparents(fx_graph, node_name, set([node_name]))
coparent_group_id = cls.__group_data['next_coparent_group_id']
cls.__group_data['coparent_groups'][coparent_group_id] = all_coparents
cls.__group_data['next_coparent_group_id'] = coparent_group_id + 1
cls.__group_data['skipped_coparent_groups'][coparent_group_id] = None
for coparent in all_coparents:
fx_graph[coparent]['coparent_group_id'] = coparent_group_id
if cls.__verbosity > 1:
print(f"New coparent group {coparent_group_id}: {all_coparents}")
return fx_graph
@classmethod
def propagate_permutation_flags(cls, fx_graph):
"""Disallow sibling groups from having different C_permutable flags and coparent groups from having different K_permutable flags within the groups"""
made_change = True # will we need to repeat this propagation?
# TODO: just propagate to sibling groups and coparent groups directly, instead of iteratively to direct real_parents and siblings
while made_change:
made_change = False
if cls.__verbosity > 0:
print("Making a pass at propagating permutation flags")
for node_name in fx_graph.keys():
fx_node = fx_graph.get(node_name)
node_parents = fx_graph.get(node_name).get('parents')
node_real_parents = fx_graph.get(node_name).get('real_parents')
node_children = fx_graph.get(node_name).get('children')
node_real_children = fx_graph.get(node_name).get('real_children')
# input layers can't be permuted along C without a runtime fixup, skip them
if node_parents is None or ('x' in node_parents and 'C_permutable' in fx_graph[node_name].keys() and fx_graph[node_name]['C_permutable']):
if cls.__verbosity > 1:
print(f"{node_name} has no parents, or only an input, disabling permutations in C")
made_change = True
fx_graph[node_name]['C_permutable'] = False
# output layers can't be permuted along K without a runtime fixup, skip them
if node_children is None or ('output' in node_children and 'K_permutable' in fx_graph[node_name].keys() and fx_graph[node_name]['K_permutable']):
if cls.__verbosity > 1:
print(f"{node_name} has no children, or only an output, disabling permutations in K")
made_change = True
fx_graph[node_name]['K_permutable'] = False
fx_graph[node_name]['K_passthru'] = False
if 'is_real' in fx_node.keys() and fx_node['is_real']:
# siblings must share C-flags; if one cannot be permuted along C, none can
sibling_group_id = fx_graph[node_name]['sibling_group_id']
all_siblings = cls.__group_data['sibling_groups'][sibling_group_id]
made_change = cls.propagate_sibling_group(fx_graph, all_siblings, cls.__verbosity) or made_change
# coparents must share K-flags; if one cannot be permuted along K, none can
coparent_group_id = fx_graph[node_name]['coparent_group_id']
all_coparents = cls.__group_data['coparent_groups'][coparent_group_id]
made_change = cls.propagate_coparent_group(fx_graph, all_coparents, cls.__verbosity) or made_change
return fx_graph
@classmethod
def find_node_real_children(cls, fx_graph, node_name, found_children):
"""Collect the real children of some node"""
if 'real_children' in fx_graph[node_name].keys():
return found_children.union(fx_graph[node_name]['real_children'])
children = fx_graph[node_name]['children']
for child in children:
if child in fx_graph.keys(): # not the output node
if cls.__verbosity > 3:
print(f"\tchecking child {child} of node {node_name}")
# if it's a real node, just add it
if 'is_real' in fx_graph[child].keys() and fx_graph[child]['is_real']:
found_children.add(child)
else: # otherwise, search its children
found_children = cls.find_node_real_children(fx_graph, child, found_children)
return found_children
@classmethod
def find_real_children(cls, fx_graph):
"""Collect the real children of all nodes in the graph"""
if cls.__verbosity > 0:
print("\n[find_real_children] Find the real children for each node according to the whole network graph built with Torch.FX")
reversible_fx_graph_keys = list(fx_graph.keys())
for node_name in reversed(reversible_fx_graph_keys): # as the optimization, we need to find the real children from back to front, to use the already saved 'real_children'
node_children = fx_graph.get(node_name).get('children')
if cls.__verbosity > 2:
print("[find_real_children] node_name: \'{:}\', children: {:}".format(node_name, node_children))
real_children = cls.find_node_real_children(fx_graph, node_name, set())
if cls.__verbosity > 1:
print(f"[find_real_children] {node_name} has {len(real_children)} real children: {real_children}")
fx_graph[node_name]['real_children'] = sorted(real_children)
if cls.__save_permutation_graph:
cls.save_graph_to_json(fx_graph, save_dumped_graph_path_with_name=os.path.join(cls.__permutation_output_dir, './model_graph_find_real_children.json')) # save the intermediate graph as JSON file for debugging
return fx_graph
@classmethod
def find_node_real_parents(cls, fx_graph, node_name, found_parents):
"""Collect the real parents of some node"""
if 'real_parents' in fx_graph[node_name].keys():
return found_parents.union(fx_graph[node_name]['real_parents'])
parents = fx_graph[node_name]['parents']
for parent in parents:
if parent in fx_graph.keys(): # not the input node
if cls.__verbosity > 3:
print(f"\tchecking parent {parent} of node {node_name}")
# if it's a real node, just add it
if 'is_real' in fx_graph[parent].keys() and fx_graph[parent]['is_real']:
found_parents.add(parent)
else: # otherwise, search its parents
found_parents = cls.find_node_real_parents(fx_graph, parent, found_parents)
return found_parents
@classmethod
def find_real_parents(cls, fx_graph):
"""Collect the real parents of all nodes in the graph"""
if cls.__verbosity > 0:
print("\n[find_real_parents] Find the real parents for each node according to the whole network graph built with Torch.FX")
for node_name in fx_graph.keys():
node_real_parents_name = []
node_real_parents_module_type = []
real_parents = cls.find_node_real_parents(fx_graph, node_name, set())
if cls.__verbosity > 1:
print(f"[find_real_parents] {node_name} has {len(real_parents)} real parents: {real_parents}")
fx_graph[node_name]['real_parents'] = sorted(real_parents)
if cls.__save_permutation_graph:
cls.save_graph_to_json(fx_graph, save_dumped_graph_path_with_name=os.path.join(cls.__permutation_output_dir, './model_graph_find_real_parent.json')) # save the intermediate graph as JSON file for debugging
return fx_graph
@classmethod
def build_fx_graph(cls, model, dump_fx_graph=False, save_dumped_fx_graph='./model_fx_graph.json'):
"""Build the whole network graph with Torch.FX."""
network_fx_graph = {}
success = True
torch_version = str(torch.__version__)
torch_version_major = int(torch_version.split('.')[0])
torch_version_minor = int(torch_version.split('.')[1])
try:
torch_version_minimum = int(torch_version.split('.')[2])
except ValueError: # support the none standard version
torch_version_minimum = torch_version.split('.')[2]
if cls.__verbosity > 2:
print("[build_fx_graph] The torch version is: {}, version major is: {}, version minor is: {}, version minimum is: {}".format(torch_version, torch_version_major, torch_version_minor, torch_version_minimum))
if torch_version_major >= 2 or (torch_version_major >= 1 and torch_version_minor >= 8):
if cls.__verbosity > 1:
print("[build_fx_graph] The Torch.FX is supported.")
else: # Torch.FX is introduced in torch 1.8.0
if cls.__verbosity >= 0:
print("[build_fx_graph] The Torch.FX is not supported. So cannot build the Torch.FX graph.")
success = False
return network_fx_graph, success
if cls.__verbosity > 2:
print("\n[build_fx_graph] Print the model structure with pure PyTorch function")
print(model)
graph_module = cls.trace_and_print_raw_fx_graph(model, print_tabular=cls.__verbosity > 1) # needs "tabulate" library
if graph_module is None:
success = False
return network_fx_graph, success
if cls.__verbosity > 0:
print("\n[build_fx_graph] Build the module name and type dictionary")
module_name_type_dict = {}
module_name_group_conv_dict = {}
module_name_C_dict = {}
module_name_K_dict = {}
for name, mod in model.named_modules():
if cls.__verbosity > 1:
print("[build_fx_graph] module_name: {}, module type: {}".format(name, type(mod)))
module_name_type_dict[name] = str(type(mod)).split("\'")[1]
try:
module_name_C_dict[name] = str(mod.in_channels)
except:
try:
module_name_C_dict[name] = str(mod.in_features)
except:
try:
module_name_C_dict[name] = str(mod.embed_dim)
except:
module_name_C_dict[name] = 'None'
try:
module_name_K_dict[name] = str(mod.out_channels)
except:
try:
module_name_K_dict[name] = str(mod.out_features)
except:
try:
module_name_K_dict[name] = str(mod.embed_dim)
except:
module_name_K_dict[name] = 'None'
try:
module_name_group_conv_dict[name] = str(mod.groups)
if cls.__verbosity > 1:
print("[build_fx_graph] this module has \'group\' param with value: {}".format(mod.groups))
except:
module_name_group_conv_dict[name] = 'None'
continue
# keep track of children and parents for each layer (could be call_module or call_function)
if cls.__verbosity > 0:
print("\n[build_fx_graph] Print the children and parents relationship for each layer")
network_fx_graph = {}
for node in graph_module.graph.nodes:
if node.op == 'placeholder':
if cls.__verbosity > 2:
print("[build_fx_graph] This is the \'input\' node: {:}".format(node.target))
continue
elif node.op == 'get_attr':
if cls.__verbosity > 2:
print("[build_fx_graph] This is the \'get_attr\' node: {:}".format(node.target))
node_parent, node_children = get_node_parent_children(node)
converted_node_name=convert_fx_node_name(node.target)
network_fx_graph[converted_node_name] = {}
network_fx_graph[converted_node_name]['parents'] = node_parent
network_fx_graph[converted_node_name]['children'] = node_children
network_fx_graph[converted_node_name]['module_type'] = 'get_attr'
network_fx_graph[converted_node_name]['groups_param'] = 'None'
# inspired by https://pytorch.org/docs/stable/fx.html
def fetch_attr(target : str, mod):
target_atoms = target.split('.')
attr_itr = mod
for i, atom in enumerate(target_atoms):
if not hasattr(attr_itr, atom):
raise RuntimeError(f"Node referenced nonexistant target {'.'.join(target_atoms[:i])}")
attr_itr = getattr(attr_itr, atom)
return attr_itr
attr = fetch_attr(node.target, graph_module)
network_fx_graph[converted_node_name]['C_param'] = 1
network_fx_graph[converted_node_name]['K_param'] = -1
network_fx_graph[converted_node_name]['attr'] = attr
elif node.op == 'call_function': # e.g. 'adaptive.avg.pool2d', 'add', 'cat', 'flatten', 'floordiv', 'getattr', 'getitem', 'hardsigmoid', 'mean', 'mul', 'relu', 'transpose'
node_parent, node_children = get_node_parent_children(node)
converted_node_name=convert_fx_node_name(node.name)
if cls.__verbosity > 2:
print("[build_fx_graph] This is the \'call_function\' node: {:}, its parent list: {:}, its children list: {:}".format(converted_node_name, node_parent, node_children))
network_fx_graph[converted_node_name] = {}
network_fx_graph[converted_node_name]['parents'] = node_parent
network_fx_graph[converted_node_name]['children'] = node_children
network_fx_graph[converted_node_name]['fx_op'] = 'call_function'
### "convert" some ops to modules
# concatenating along K can be handled by reducing the size of the childrens' C appropriately
# see fixup_concats, if no dim arg, default is 0 (handled automatically)
if node.target == torch.cat and len(node.args) > 1 and node.args[1] == 1:
network_fx_graph[converted_node_name]['fx_op'] = 'call_module'
network_fx_graph[converted_node_name]['module_type'] = 'concat'
network_fx_graph[converted_node_name]['groups_param'] = 'N/A' # just need placeholders
network_fx_graph[converted_node_name]['C_param'] = 'N/A'
network_fx_graph[converted_node_name]['K_param'] = 'N/A'
elif node.op == 'call_method': # e.g. 'chunk', 'contiguous', 'mean', 'size', 'unsqueeze', 'view'
node_parent, node_children = get_node_parent_children(node)
converted_node_name=convert_fx_node_name(node.name)
if cls.__verbosity > 2:
print("[build_fx_graph] This is the \'call_method\' node: {:}, its parent list: {:}, its children list: {:}".format(converted_node_name, node_parent, node_children))
network_fx_graph[converted_node_name] = {}
network_fx_graph[converted_node_name]['parents'] = node_parent
network_fx_graph[converted_node_name]['children'] = node_children
network_fx_graph[converted_node_name]['fx_op'] = 'call_method'
continue
elif node.op == 'call_module':
node_parent, node_children = get_node_parent_children(node)
converted_node_name=convert_fx_node_name(node.name)
# check whether the converted_node_name is same as node.target, especially for ReLU case
if converted_node_name != node.target:
if cls.__verbosity > 2:
print("[build_fx_graph][warning] The target name from Torch.FX is \'{:}\', the manually converted node name is \'{:}\', not the same one, choose the converted node name".format(node.target, converted_node_name))
# assume the modules share the same target name have the same type, because converted_node_name may not be obtained by model.named_modules(), like some ReLU (defined in forward function)
node_type = module_name_type_dict[node.target]
if cls.__verbosity > 2:
print("[build_fx_graph] This is the \'call_module\' node: {:}, its parent list: {:}, its children list: {:}, its type: {:}".format(converted_node_name, node_parent, node_children, node_type))
network_fx_graph[converted_node_name] = {}
network_fx_graph[converted_node_name]['parents'] = node_parent
network_fx_graph[converted_node_name]['children'] = node_children
network_fx_graph[converted_node_name]['fx_op'] = 'call_module'
network_fx_graph[converted_node_name]['module_type'] = node_type
network_fx_graph[converted_node_name]['groups_param'] = module_name_group_conv_dict[node.target]
network_fx_graph[converted_node_name]['C_param'] = module_name_C_dict[node.target]
network_fx_graph[converted_node_name]['K_param'] = module_name_K_dict[node.target]
elif node.op == 'output':
if cls.__verbosity > 2:
print("[build_fx_graph] This is the \'output\' node: {:}".format(node.target))
continue
if dump_fx_graph:
if cls.__verbosity > 0:
print("\n[build_fx_graph] Dump the overall dict for children and parents relationship into JSON file")
cls.save_graph_to_json(network_fx_graph, save_dumped_graph_path_with_name=save_dumped_fx_graph)
return network_fx_graph, success
@classmethod
def trace_and_print_raw_fx_graph(cls, model, print_tabular=False, generate_python_code=False):
"""This function is used to find and print the intermediate representation (IR) - Graph representation with Torch.FX features."""
from torch.fx import symbolic_trace
import traceback
# Symbolic tracing frontend - captures the semantics of the module
try:
symbolic_traced : torch.fx.GraphModule = symbolic_trace(model)
except Exception as ex:
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
if cls.__verbosity > 0:
print(ex)
print(''.join(traceback.format_exception(etype=type(ex), value=ex, tb=ex.__traceback__)))
print("\n[print_raw_fx_graph] Meet the fatal fault when trying to symbolic trace the model with Torch.FX")
return None
# High-level intermediate representation (IR) - Graph representation
if cls.__verbosity > 1:
print("\n[print_raw_fx_graph] Print the intermediate representation (IR) with Torch.FX")
print(symbolic_traced.graph)
if print_tabular:
print("\n[print_raw_fx_graph] Print the intermediate representation (IR) with Torch.FX in a table format")
try:
from tabulate import tabulate
symbolic_traced.graph.print_tabular()
except ImportError:
if cls.__verbosity > 1:
print("[print_raw_fx_graph][Warning] \'print_tabular\' relies on the library `tabulate`; run `pip install tabulate` to install it.")
except AttributeError: # to avoid the AttributeError: 'Graph' object has no attribute 'print_tabular'
if cls.__verbosity > 1:
print("[print_raw_fx_graph][Warning] \'print_tabular\' function is not supported in current Torch version. Skip!")
# Code generation - valid Python code
if generate_python_code:
print("\n[print_raw_fx_graph] Create valid Python code matching the IR/Graph's semantics with Torch.FX")
print(symbolic_traced.code)
return symbolic_traced
@classmethod
def save_graph_to_json(cls, graph, save_dumped_graph_path_with_name='./model_fx_graph.json'):
"""This function is used to save the graph into JSON file for inspection."""
# use dumps to transfer the dict to JSON string
json_graph_str = json.dumps(graph)
with open(save_dumped_graph_path_with_name, 'w', encoding='utf-8') as dumped_graph_file:
dumped_graph_file.write(json_graph_str) # write the transferred JSON string into JSON file
| GeneSplice-main | GeneSplice/apex/apex/contrib/sparsity/permutation_lib.py |
import sys
import torch
import numpy as np
import collections
from itertools import permutations
""" compute density (helper fn to compute % NNZs in a tensor) """
def fill(x):
return float(x.nonzero().size(0))/torch.numel(x)
""" reshape matrix into m-dimensional vectors: (h,w) -> (hw/m, m) """
def reshape_1d(matrix, m):
# If not a nice multiple of m, fill with zeroes.
if matrix.shape[1] % m > 0:
mat = torch.cuda.FloatTensor(matrix.shape[0], matrix.shape[1] + (m-matrix.shape[1]%m)).fill_(0)
mat[:, :matrix.shape[1]] = matrix
shape = mat.shape
return mat.view(-1,m),shape
else:
return matrix.view(-1,m), matrix.shape
""" return all possible m:n patterns in a 1d vector """
valid_m4n2_1d_patterns = None
def compute_valid_1d_patterns(m,n):
# Early exit if patterns was already created.
global valid_m4n2_1d_patterns
if m==4 and n==2 and valid_m4n2_1d_patterns is not None: return valid_m4n2_1d_patterns
patterns = torch.zeros(m)
patterns[:n] = 1
valid_patterns = torch.tensor(list(set(permutations(patterns.tolist()))))
if m == 4 and n == 2: valid_m4n2_1d_patterns = valid_patterns
return valid_patterns
""" m:n 1d structured best """
def mn_1d_best(matrix, m, n):
# Find all possible patterns.
patterns = compute_valid_1d_patterns(m,n).cuda()
# Find the best m:n pattern (sum of non-masked weights).
mask = torch.cuda.IntTensor(matrix.shape).fill_(1).view(-1,m)
mat,shape = reshape_1d(matrix,m)
pmax = torch.argmax(torch.matmul(mat.abs(),patterns.t()), dim=1)
mask[:] = patterns[pmax[:]]
mask = mask.view(matrix.shape)
return mask
def m4n2_1d(mat, density):
return mn_1d_best(mat, 4, 2)
"""
Below 2d-masking related code is targeted more for training (from scratch).
2d-pruning of a weight tensor is done to accelerate DGRAD step during backprop
phase of training algorithm. Acceleration comes from using SpMMA instructions in
Tensor Cores of NVIDIA Ampere GPU Architecture
(note: this code does not do the acceleration, GPU kernels are required for this).
1d pruning of weight tensor helps speed up FPROP step by pruning in 2:4 pattern
along the horizontal (logical) direction.
During DGRAD step, weight tensor is transposed. 2d pruning functions below, mask
weight tensor such that their transposed versions are also 2:4 sparse along the
horizontal (logical) direction. Thus, with 2d pruning, weight tensors are
2:4 sparse along row and column directions.
"""
""" m:n 2d structured pruning: greedy method to select mask """
def mn_2d_greedy(matrix, m, n):
# Convert to numpy
mat = matrix.cpu().detach().numpy()
mask = np.ones(mat.shape, dtype=int)
rowCount = int(mat.shape[0]/m) * m
colCount = int(mat.shape[1]/m) * m
for rowStartIdx in range(0, rowCount, m):
rowEndIdx = rowStartIdx + m
for colStartIdx in range(0, colCount, m):
colEndIdx = colStartIdx + m
matrixSub = np.absolute(np.squeeze(mat[rowStartIdx:rowEndIdx, colStartIdx:colEndIdx]))
maskSub = np.squeeze(mask[rowStartIdx:rowEndIdx, colStartIdx:colEndIdx])
maskSub.fill(0.0)
matrixVecView = matrixSub.reshape(-1)
maskVecView = maskSub.reshape(-1)
linearIdx = np.argsort(matrixVecView)
matrixIdx = [(int(x/m), x % m) for x in linearIdx]
rowCounter = collections.Counter()
colCounter = collections.Counter()
for currIdx in range(len(linearIdx) - 1, -1, -1):
currMatrixEntry = matrixIdx[currIdx]
if (rowCounter[currMatrixEntry[0]] == n) or (colCounter[currMatrixEntry[1]] == n):
continue
#end if
maskSub[currMatrixEntry[0], currMatrixEntry[1]] = 1.0
rowCounter[currMatrixEntry[0]] += 1
colCounter[currMatrixEntry[1]] += 1
return torch.tensor(mask.cuda())
def m4n2_2d_greedy(mat, density):
return mn_2d_greedy(mat, 4, 2)
""" return all possible m:n patterns in a mxn block. """
valid_m4n2_2d_patterns = None
def compute_valid_2d_patterns(m,n):
# Early exit if patterns was already created.
global valid_m4n2_2d_patterns
if valid_m4n2_2d_patterns is not None: return valid_m4n2_2d_patterns
patterns = torch.zeros(m)
patterns[:n] = 1
patterns = list(set(permutations(patterns.tolist())))
patterns = patterns + patterns
patterns = torch.empty(list(set(permutations(patterns,m))))
valid = ((patterns.sum(dim=1) <= n).sum(dim=1) == m).nonzero().view(-1)
valid_patterns = torch.empty(valid.shape[0],m,m)
valid_patterns[:] = patterns[valid[:]]
if m == 4 and n == 2: valid_m4n2_2d_patterns = valid_patterns
return valid_patterns
""" m:n 2d structured pruning: exhaustive method to select best mask """
def mn_2d_best(matrix, m, n):
# Find all possible patterns.
patterns = compute_valid_2d_patterns(m,n).cuda()
# Find the best m:n pattern (sum of non-masked weights).
mask = torch.cuda.IntTensor(matrix.shape).fill_(1)
mat = reshape_2d(matrix,m,m).abs()
pmax = torch.argmax(torch.matmul(mat,patterns.view(patterns.shape[0],m*m).t()), dim=2)
# Copy best m:n patterns into mask.
mat = mat.view(mat.shape[0]*mat.shape[1],-1)
pmax = pmax.view(pmax.shape[0]*pmax.shape[1]).unsqueeze(1).expand(-1,mat.shape[1])
patterns = patterns.view(patterns.shape[0],patterns.shape[1]*patterns.shape[2])
mat = torch.gather(patterns,0,pmax)
mat = reshape_2d_inv(mat.view(matrix.shape[0]//m,matrix.shape[1]//m,m,m))
mask.copy_(mat.type(mask.type()))
return mask
def m4n2_2d_best(mat, density):
return mn_2d_best(mat, 4, 2)
""" returns a sparse mask """
def create_mask(tensor, pattern="m4n2_1d", density=0.5):
# Reshape tensor and mask.
shape = tensor.shape
ttype = tensor.type()
t = tensor.float().contiguous()
# 1d-tensor
if len(shape) == 1:
t = t.view(1, shape[0])
func = getattr(sys.modules[__name__], pattern, None)
mask = func(t, density)
return mask.view(shape).type(ttype)
# 2d-tensor (K, C)
elif len(shape) == 2:
# linear
t = t.view(shape[0], shape[1])
func = getattr(sys.modules[__name__], pattern, None)
mask = func(t, density)
return mask.view(shape).type(ttype)
# 3d-tensor (K, C, R)
elif len(shape) == 3:
# 1d convs
t = t.permute(0,2,1).contiguous().view(shape[0]*shape[2], shape[1])
func = getattr(sys.modules[__name__], pattern, None)
mask = func(t, density)
mask = mask.view(shape[0], shape[2], shape[1]).permute(0,2,1).contiguous()
return mask.view(shape).type(ttype)
# 4d-tensor (K, C, R, S)
elif len(shape) == 4:
"""
# transformers (bmm)
t = t.view(shape[0]*shape[1]*shape[2], shape[3])
func = getattr(sys.modules[__name__], pattern, None)
mask = func(t, density)
return mask.view(shape).type(ttype)
"""
# 2d convs
t = t.permute(2,3,0,1).contiguous().view(shape[2]*shape[3]*shape[0], shape[1])
func = getattr(sys.modules[__name__], pattern, None)
mask = func(t, density)
mask = mask.view(shape[2], shape[3], shape[0], shape[1]).permute(2,3,0,1).contiguous()
return mask.view(shape).type(ttype)
| GeneSplice-main | GeneSplice/apex/apex/contrib/sparsity/sparse_masklib.py |
from .permutation_utilities import *
################################################################################################################
# Exhaustive
# Try them all
# - order of columns within a group doesn't matter
# - order of groups doesn't matter
# - we can eliminate effective duplicates by defining aunique combination to be a sorted list of sorted groups
################################################################################################################
####################################################################
# generate unique permutations
####################################################################
# check if adding a column index to a current permutation would keep it in canonical form
# assumes that perm is in canonical form already!
def is_canonical(perm, col):
# if it's a new group
if len(perm) % 4 == 0:
# every column ID < col needs to be in the permutation already
for val in range(col):
if val not in perm:
return False
# this new group needs to be sorted w.r.t. the previous group
return col > perm[-4]
# not a new group, just check to see if it will still be sorted
return col > perm[-1]
# recursive: build a unique permutation one column index at a time
def generate_unique_combinations(built_permutation, remaining_columns, full_permutation_list, group_width):
# base case: nothing else to add
if len(remaining_columns) == 0:
full_permutation_list.append(np.copy(built_permutation))
if len(full_permutation_list) % 1000000 == 0:
print(f"{len(full_permutation_list)} unique permutations found so far")
# still more choices to make, so add each remaining column in turn column if it keeps everything sorted
else:
for c in range(len(remaining_columns)):
# to satisfy our immutables (values within groups are sorted, groups are globally sorted),
# only add this column if either:
# it's starting a new group and is larger than the previous group's first entry
# OR
# it's larger than the last value in the built_permutation
col_to_add = remaining_columns[c]
if is_canonical(built_permutation, col_to_add):
# add the column to the running permutation, remove it from remaining columns
built_permutation.append(col_to_add)
remaining_columns.pop(c)
# recurse
generate_unique_combinations(built_permutation, remaining_columns, full_permutation_list, group_width)
# remove the most recent column and put it back on the remaining column list where we found it (sorted)
remaining_columns.insert(c, built_permutation.pop(-1))
import pickle
import os.path
from os import path
master_unique_permutation_list = {}
def generate_all_unique_combinations(C, M, must_use_all_groups = False):
global master_unique_permutation_list
if len(master_unique_permutation_list) == 0 and path.exists("master_list.pkl"):
with open("master_list.pkl","rb") as cache:
master_unique_permutation_list = pickle.load(cache)
if (C,M) not in master_unique_permutation_list:
full_permutation_list = []
generate_unique_combinations([0], [c for c in range(1,C)], full_permutation_list, M)
master_unique_permutation_list[(C,M)] = full_permutation_list
with open("master_list.pkl", "wb") as cache:
pickle.dump(master_unique_permutation_list, cache)
unique_permutations = master_unique_permutation_list[(C,M)]
return unique_permutations
# analytical solution
import math
def predict_unique_combinations(C, M):
assert(C%M==0)
G = int(C/M)
return int(int(math.factorial(C)) / (int(math.pow(math.factorial(M),G)) * math.factorial(G)))
#################################################################
# exhaustively try all unique permutations
#################################################################
# exhaustively search the entire matrix
def search_matrix(matrix, group_width):
# give up quickly if we'd go on forever
prediction = predict_unique_combinations(matrix.shape[1], group_width)
best_permutation = [c for c in range(matrix.shape[1])]
if prediction > 1e10:
print(f"There are {prediction} unique combinations with {matrix.shape[1]} columns and a group width of {group_width}, not searching.")
return matrix, prediction, best_permutation
start_time = time.perf_counter()
full_permutation_list = generate_all_unique_combinations(matrix.shape[1], group_width)
# found them, now try them
best_improvement = 0.0
use_cuda = use_gpu()
if use_cuda and matrix.shape[1] >= 8 and group_width == 4: # CUDA path only works for a group width of 4
best_improvement, best_permutation = try_permutations_on_matrix(matrix, full_permutation_list)
else:
base_sum = sum_after_2_to_4(matrix)
for i in range(1,len(full_permutation_list)):
permutation = full_permutation_list[i]
permuted = matrix[:, permutation]
cur_improvement = sum_after_2_to_4(permuted) - base_sum
if (cur_improvement > best_improvement):
best_improvement = cur_improvement
best_permutation = permutation
seconds = time.perf_counter() - start_time
return matrix[:, best_permutation], seconds, best_permutation, best_improvement
#############
# Stripe group handling
#############
# gather stripes from a larger matrix into a single matrix
def collect_stripes(matrix, stripes, group_width):
subset = np.zeros((matrix.shape[0], len(stripes)*group_width))
for s,stripe in enumerate(stripes):
subset[...,s*group_width:s*group_width+group_width] = matrix[...,stripe*group_width:stripe*group_width+group_width]
return subset
# apply the stripe group permutation to the entire permutation
def apply_stripe_group_permutation(sgp, stripes, group_width, permutation):
new_permutation = permutation.copy()
for subset_idx in range(len(sgp)):
dst_stripe_idx = stripes[int(subset_idx / group_width)]
dst_col_idx = subset_idx % group_width
subset_val = sgp[subset_idx]
src_stripe_idx = stripes[int(subset_val / group_width)]
src_col_idx = subset_val % group_width
new_permutation[dst_stripe_idx*group_width + dst_col_idx] = permutation[src_stripe_idx*group_width + src_col_idx]
return new_permutation
# generate all possible stripe groups
def generate_stripe_groups(num_stripes, window_size):
stripe_array = [[c] for c in range(num_stripes)]
next_stripe_array = []
for w in range(1, window_size):
for g in range(len(stripe_array)):
start_c = stripe_array[g][w-1]+1
group = stripe_array[g]
for c in range(start_c, num_stripes):
new_group = group.copy()
new_group.append(c)
next_stripe_array.append(new_group)
stripe_array = next_stripe_array
next_stripe_array = []
return set(tuple(stripe_array[g]) for g in range(len(stripe_array)))
# It is not safe to just reset the stripe_set as None here.
# When calling the Exhaustive_Search in E2E search, the stripe_set will not be reset as None.
stripe_set = None
stripe_set_config = None
# build the stripe map
def build_stripe_map(matrix, group_width, window_size, stripe_map, stripe_ids, perm_map, used_stripes):
global stripe_set, stripe_set_config
window_size = int(window_size / group_width)
if stripe_set is None or stripe_set_config is None or stripe_set_config != (group_width, window_size):
num_stripes = int(matrix.shape[1] / group_width)
assert(group_width * num_stripes == matrix.shape[1])
stripe_set = generate_stripe_groups(num_stripes, window_size)
stripe_set_config = (group_width, window_size)
# step through each, update the stripe_map/stripe_ids if necessary
updates = 0
use_cuda = use_gpu()
gpu_list = []
gpu_groups = []
for i,s in enumerate(stripe_set):
sg = [] # build the group of stripes, check if any members changed
need_update = i >= len(stripe_map)
for stripe in s:
sg.append(stripe)
if stripe in used_stripes:
need_update = True
# pre-populate if we're building fresh
if i >= len(stripe_map):
stripe_ids.append(sg)
stripe_map.append(0.)
perm_map.append([c for c in range(group_width * window_size)])
# update entries if needed (only stripe_map and perm_map)
if need_update:
updates += 1
if not use_cuda: # do the work here if using the CPU
subset = collect_stripes(matrix, sg, group_width)
sub_result, sub_duration, permutation, improvement = search_matrix(subset, group_width)
stripe_map[i] = improvement
perm_map[i] = permutation
else: # otherwise, just track the work needed to farm off to the GPU
gpu_groups.append(sg)
gpu_list.append(i)
if use_cuda: # if using the GPU, perform the work
matrix_view = np.copy(matrix).astype(np.float32).flatten()
all_permutations = generate_all_unique_combinations(window_size*group_width, group_width)
num_permutations = len(all_permutations)
permutation_view = np.copy(np.asarray(all_permutations)).astype(np.uint32).flatten()
stripe_groups_view = np.asarray(gpu_groups).astype(np.uint32).flatten()
num_gpu_groups = len(gpu_list)
gpu_improvement = np.zeros((num_gpu_groups), dtype=np.float32).flatten()
gpu_permutation = np.zeros((num_gpu_groups), dtype=np.uint32).flatten()
result = permutation_search_cuda_kernels.build_permute_map(matrix_view,
matrix.shape[0],
matrix.shape[1],
stripe_groups_view,
num_gpu_groups,
window_size,
permutation_view,
window_size * group_width,
gpu_improvement,
gpu_permutation)
# put the data where python expects it
for i in range(len(gpu_list)):
stripe_map[gpu_list[i]] = gpu_improvement[i]
perm_map[gpu_list[i]] = all_permutations[gpu_permutation[i]]
return stripe_map, stripe_ids, perm_map
# start performing stripe checks
sm_perturbations = 0
sm_perturbation_limit = 0
def use_stripe_map(matrix, group_width, stripe_map, stripe_ids, perm_map, permutation):
global sm_perturbations, sm_perturbation_limit
used_stripes = []
stripe_groups_optimized = 0
improvement = 0.0
# set the traversal order
ix = np.flip(np.argsort(stripe_map)) # small to large --> large to small
for i in range(len(ix)):
stripe_group_id = ix[i]
perm = perm_map[stripe_group_id].copy()
if stripe_map[stripe_group_id] <= np.finfo(np.float16).tiny*5.:
# perturbations
if len(used_stripes) == 0 and sm_perturbations < sm_perturbation_limit:
sm_perturbations += 1
# use this permutation, but swap two channels from left/right halves to include two stripes, no matter the group size
stripe_group_id = ix[np.random.randint(len(ix))]
perm = perm_map[stripe_group_id].copy()
# a little easier to escape from
src = np.random.randint(int(len(perm)/2))
dst = int(len(perm)/2) + np.random.randint(int(len(perm)/2))
perm[src],perm[dst] = perm[dst],perm[src]
else:
break
stripe_group = stripe_ids[stripe_group_id]
# don't work on stripes we've already touched
touched_stripe = False
for stripe in stripe_group:
if stripe in used_stripes:
touched_stripe = True
if touched_stripe:
continue
# apply the permutation we've already found to this stripe group
subset = collect_stripes(matrix, stripe_group, group_width)
sub_result = subset[...,perm]
permutation = apply_stripe_group_permutation(perm, stripe_group, group_width, permutation)
# scatter the results, track what changed
for s,stripe in enumerate(stripe_group):
# see if this group is in canonical form (entry 0 a multiple of 4, contiguous values))
group = perm[s*group_width:s*group_width+group_width] # columns in this group of the used permutation
changed = False
if group[0] % 4 != 0:
changed = True
for c in range(1,group_width):
if group[c] != group[c-1]+1:
changed = True
break
# if it's not, then it changed
if changed:
used_stripes.append(stripe_group[s])
matrix[...,stripe*group_width:stripe*group_width+group_width] = sub_result[...,s*group_width:s*group_width+group_width]
improvement += stripe_map[stripe_group_id]
stripe_groups_optimized += 1
return matrix, stripe_groups_optimized, stripe_map, stripe_ids, used_stripes, improvement, permutation
# entry point for exhaustive searches - both the entire matrix, as well as stripe groups
def Exhaustive_Search(matrix, stripe_group_size=-1, escape_attempts=0, permutation=None):
global sm_perturbation_limit, sm_perturbations
sm_perturbations = 0
sm_perturbation_limit = escape_attempts
if permutation is None:
permutation = [c for c in range(matrix.shape[1])]
# It is much safer to reset the stripe_set as None in the entry point of Exhaustive_Search
global stripe_set, stripe_set_config
stripe_set = None
stripe_set_config = None
# only support N:4 for now
group_width = 4
result = np.copy(matrix)
# if the matrix is too large for a window size of 12, subdivide, then fix up with a global optimization with a window size of 8
if group_width==4 and stripe_group_size==12 and matrix.shape[1] > 512:
stripe_split = int(matrix.shape[1]/2/group_width)
col_split = stripe_split * group_width
result[:,:col_split], durationL, permutation[:col_split] = Exhaustive_Search(result[:,:col_split], stripe_group_size=stripe_group_size, escape_attempts=escape_attempts, permutation=permutation[:col_split])
result[:,col_split:], durationR, permutation[col_split:] = Exhaustive_Search(result[:,col_split:], stripe_group_size=stripe_group_size, escape_attempts=escape_attempts, permutation=permutation[col_split:])
escape_attempts = max(escape_attempts, 100)*10
result,duration,permutation = Exhaustive_Search(result, stripe_group_size=8, escape_attempts=escape_attempts, permutation=permutation)
return result, durationL+durationR+duration, permutation
# small enough to optimize the entire matrix at once
if stripe_group_size != -1 and stripe_group_size < matrix.shape[1]:
stripe_map = []
stripe_ids = []
perm_map = []
used_stripes = []
# in practice, this work will be cached ahead of time; doing it now.
# (Reading the cached list from disk can take several seconds, which shouldn't be counted against the search, but amortized over every layer in a network)
generate_all_unique_combinations(stripe_group_size, group_width)
start_time = time.perf_counter()
while True:
#print("[Debug][Exhaustive_Search] Before entering the build_stripe_map function.")
#print("[Debug][Exhaustive_Search] Now the stripe_set value is: {}".format(stripe_set))
stripe_map, stripe_ids, perm_map = build_stripe_map(result, group_width, stripe_group_size, stripe_map, stripe_ids, perm_map, used_stripes)
result, stripe_groups_optimized, stripe_map, stripe_ids, used_stripes, improvement, permutation = use_stripe_map(result, group_width, stripe_map, stripe_ids, perm_map, permutation)
# converged?
if len(used_stripes) == 0:
break
duration = time.perf_counter() - start_time
else: # no sliding window, single iteration
print(f"Matrix has {matrix.shape[1]} columns and the search window is only {stripe_group_size}: searching exhaustively")
result, duration, permutation, improvement = search_matrix(matrix, group_width)
return result, duration, permutation
| GeneSplice-main | GeneSplice/apex/apex/contrib/sparsity/permutation_search_kernels/exhaustive_search.py |
from .permutation_utilities import *
################################################################################################################
# Greedy Channel Swaps - iterative, deterministic, can be parallelized
# 1. Build a map of the magnitude improvement of involved stripes for all pairs of channel swaps
# 2. Sort the map, march through by decreasing improvement, skipping entries whose stripes have been modified
# 3. Repeat until there's no entry with positive improvement (convergence)
################################################################################################################
## try swapping columns and tracking magnitude after pruning
def try_swap(matrix, dst, src):
src_base = sum_after_2_to_4(matrix[...,int(src/4)*4:int(src/4)*4+4])
dst_base = sum_after_2_to_4(matrix[...,int(dst/4)*4:int(dst/4)*4+4])
# swap
matrix[...,[src,dst]] = matrix[...,[dst,src]]
# check the Nx4 slices of the swapped columns
src_sum = sum_after_2_to_4(matrix[...,int(src/4)*4:int(src/4)*4+4])
dst_sum = sum_after_2_to_4(matrix[...,int(dst/4)*4:int(dst/4)*4+4])
# swap back
matrix[...,[src,dst]] = matrix[...,[dst,src]]
return src_sum + dst_sum, (src_sum + dst_sum) - (src_base + dst_base)
## convert stripe and a swap indices to columns
def stripes_and_swap_idx_to_columns(stripe0, stripe1, idx):
i = 0
for c0 in range(4):
for c1 in range(4):
if i == idx:
return stripe0*4+c0, stripe1*4+c1
i += 1
return None
## convert columns to stripe and swap indices
def columns_to_stripes_and_swap_idx(col0, col1):
stripe0 = int(col0/4)
col0 %= 4
stripe1 = int(col1/4)
col1 %= 4
idx = 0
for c0 in range(4):
for c1 in range(4):
if c0 == col0 and c1 == col1:
return stripe0, stripe1, idx
idx += 1
return None
## build a list of stripe pairs that need their benefits recomputed because one stripe was modified
def build_stripe_pairs(matrix, used_stripes):
stripe_pairs = []
total_stripes = int(matrix.shape[1]/4)
used_stripes = np.sort(used_stripes)
for stripe0 in range(total_stripes-1):
for stripe1 in range(stripe0, total_stripes):
if stripe0 in used_stripes or stripe1 in used_stripes:
stripe_pairs.append([stripe0,stripe1])
return np.asarray(stripe_pairs)
## compute the benefit of swapping each pair of columns in the matrix using the GPU
## only update stripes' columns that appear in used_stripes to avoid unnecessary computations
def compute_swap_map(matrix, used_stripes):
do_gpu = use_gpu()
assert(do_gpu)
stripe_pairs = build_stripe_pairs(matrix, used_stripes).astype(np.uint32)
matrix_view = matrix.astype(np.float32).flatten()
stripe_pairs_view = stripe_pairs.flatten()
output = np.zeros((len(stripe_pairs)*16), dtype=np.float32).flatten()
result = permutation_search_cuda_kernels.build_swap_map(matrix_view, matrix.shape[0], matrix.shape[1], stripe_pairs_view, output)
# translate the flat array from the GPU to a map
pair_improvement_map = {}
for i,pair in enumerate(stripe_pairs):
for swap_idx in range(16):
col0, col1 = stripes_and_swap_idx_to_columns(pair[0], pair[1], swap_idx)
pair_improvement_map[(col0, col1)] = output[i*16+swap_idx]
return pair_improvement_map
## build the full swap map
def build_swap_map(matrix, swap_map, swap_ids, used_stripes, verbosity):
improvements = None
# if we have a GPU and built kernels, pre-compute the needed values
do_gpu = use_gpu()
if do_gpu:
if len(swap_map) == 0:
used_stripes = [s for s in range(int(matrix.shape[1]/4))]
improvements = compute_swap_map(matrix, used_stripes)
idx = 0
updates = 0
for src in range(matrix.shape[1]-1): # parallelize these loops
for dst in range(src+1, matrix.shape[1]):
# swapping within a stripe does nothing
if int(src/4) == int(dst/4):
continue
# if we touched this stripe last time, update it
if (int(src/4) in used_stripes) or (int(dst/4) in used_stripes) or len(swap_map) <= idx:
tmp_improvement = 0.0
# use the pre-computed values from the GPU if possible, otherwise compute on the CPU
if do_gpu:
tmp_improvement = improvements[(src,dst)]
else:
tmp_mag, tmp_improvement = try_swap(matrix, src, dst)
updates += 1
if len(swap_map) <= idx:
swap_map.append(tmp_improvement)
swap_ids.append((src,dst))
else:
swap_map[idx] = tmp_improvement
swap_ids[idx] = (src,dst)
idx += 1
if verbosity > 15:
print(f"\tupdated {updates} map entries")
return swap_map, swap_ids
def use_swap_map(matrix, swap_map, swap_ids, threshold, used_escape_attempts, escape_attempts, permutation, verbosity):
used_stripes = []
swaps = 0
improvement = 0.0
# set the traversal order and threshold
ix = np.flip(np.argsort(swap_map)) # small to large -> large to small
threshold = min(max(swap_map[ix[0]] * threshold, 0.0001),1.0)
# iterate through the potential swaps in benefit order
for swap in range(len(ix)):
swap_id = ix[swap]
src = swap_ids[swap_id][0]
dst = swap_ids[swap_id][1]
# early-out of swaps that are below the threshold (don't be so greedy)
if swap_map[ix[swap]] < threshold:
# see if an arbitrary swap helps things if we've converged
if len(used_stripes) == 0 and used_escape_attempts < escape_attempts:
swap_id = np.random.randint(len(swap_ids))
if verbosity > 15:
print(F"converged, attempt #{used_escape_attempts+1} to jiggle out, using index {swap_id} into the sorted list={ix[swap_id]}")
swap_id =ix[swap_id]
src = swap_ids[swap_id][0]
dst = swap_ids[swap_id][1]
used_escape_attempts += 1
else:
break
# skip swaps that include a stripe we've already modified
if int(src/4) in used_stripes or int(dst/4) in used_stripes:
continue
# we'll need to update these stripes later
used_stripes.append(int(src/4))
used_stripes.append(int(dst/4))
# make the swap
if verbosity > 20:
print(F"\t{swap}\t{src},{dst} {swap_map[swap_id]:.4f}")
matrix[...,[src,dst]] = matrix[...,[dst,src]]
permutation[src],permutation[dst] = permutation[dst],permutation[src]
improvement += swap_map[swap_id]
swaps += 1
return matrix, swaps, swap_map, swap_ids, used_stripes, improvement, used_escape_attempts, permutation
def Channel_Swap(matrix, escape_attempts=0, verbosity=0, permutation=None):
threshold = 0.00001
used_escape_attempts = 0
# initialize
if permutation is None:
permutation = [c for c in range(matrix.shape[1])]
swap_map = []
swap_ids = []
used_stripes = []
swap_count = 0
iterations = 0
agg_improvement = 0.
cur_total_sum = sum_after_2_to_4(matrix)
start_time = time.perf_counter()
# do the work
swapped = 1 # just start with nonzero value to fall into the loop
while swapped > 0:
swap_map, swap_ids = build_swap_map(matrix, swap_map, swap_ids, used_stripes, verbosity)
matrix, swapped, swap_map, swap_ids, used_stripes, improvement, used_escape_attempts, permutation = use_swap_map(matrix, swap_map, swap_ids, threshold, used_escape_attempts, escape_attempts, permutation, verbosity)
agg_improvement += improvement
# keep track of statistics, print occasionally
swap_count += swapped
if verbosity > 10:
iterations += 1
cur_total_sum += agg_improvement
duration = time.perf_counter() - start_time
print(F"\t{iterations:8} {cur_total_sum:7.2f} {agg_improvement:7.2f} {swap_count:4} {agg_improvement/max(swap_count,1):5.2f} {duration:7.2f}")
agg_improvement = 0.
swap_count = 0
# final status
seconds = time.perf_counter() - start_time
return matrix, seconds, permutation
| GeneSplice-main | GeneSplice/apex/apex/contrib/sparsity/permutation_search_kernels/channel_swap.py |
import numpy as np
from .permutation_utilities import *
from .exhaustive_search import Exhaustive_Search
def accelerated_search_for_good_permutation(matrix_group, options=None, verbosity=0):
"""This function is used to call the permutation search CUDA kernels.
users can provide prefer search strategy by providing a valid 'options' as a dictionary,
or users can implement their customized 'accelerated_search_for_good_permutation' function.
"""
input_matrix = matrix_group.cpu().detach().numpy()
if verbosity > 1:
print("\n[accelerated_search_for_good_permutation] input matrix shape: \'{:}\'.".format(input_matrix.shape))
result = np.copy(input_matrix)
# init a sequential permutation search sequence
input_channel_num = matrix_group.size(1)
permutation_sequence = [n for n in range(input_channel_num)]
duration = 0.0
if options == None:
options = {}
if 'strategy' not in options: # right now, the default permutation search strategy is: 'exhaustive' search
options['strategy'] = 'exhaustive'
if verbosity > 1:
print("[accelerated_search_for_good_permutation] the permutation strategy is: \'{:} search\'.".format(options['strategy']))
# define sub options for each search strategy
if options['strategy'] == 'exhaustive':
# right now, the default options for 'exhaustive' search is: 'exhaustive,8,100'
if 'stripe_group_size' not in options:
options['stripe_group_size'] = 8
if 'escape_attempts' not in options:
options['escape_attempts'] = 100
elif options['strategy'] == 'progressive channel swap':
# just swaps meaningful channels, keeping the good swaps, until the search time limit expires.
if 'progressive_search_time_limit' not in options:
options['progressive_search_time_limit'] = 60
if 'improvement_threshold' not in options:
options['improvement_threshold'] = 1e-9
# execute the requested strategy
if options['strategy'] == 'exhaustive':
result, duration, permutation_sequence = Exhaustive_Search(result, stripe_group_size=options['stripe_group_size'], escape_attempts=options['escape_attempts'])
elif options['strategy'] == 'progressive channel swap':
real_swap_num = 0
start_time = time.perf_counter()
while time.perf_counter() - start_time < options['progressive_search_time_limit']:
src = np.random.randint(result.shape[1])
dst = np.random.randint(result.shape[1])
src_group = int(src/4)
dst_group = int(dst/4)
if src_group == dst_group: # channel swapping within a stripe does nothing
continue
new_sum, improvement = try_swap(result, dst, src)
if improvement > options['improvement_threshold']:
result[...,[src,dst]] = result[...,[dst,src]]
permutation_sequence[src], permutation_sequence[dst] = permutation_sequence[dst], permutation_sequence[src]
real_swap_num += 1
duration = time.perf_counter() - start_time
if verbosity > 1:
print("\tFinally swap {} channel pairs until the search time limit expires.".format(real_swap_num))
elif options['strategy'] == 'user defined': # need to get the permutated matrix (result) by applying customized permutation search function
if verbosity > 1:
print("[accelerated_search_for_good_permutation] Use the user customized permutation search function!")
else:
if verbosity >= 0:
print("[accelerated_search_for_good_permutation] Cannot find the implementation of the required strategy!")
if verbosity > 1:
print("[accelerated_search_for_good_permutation] Take {:.4f} seconds to search the permutation sequence.".format(duration))
return permutation_sequence
| GeneSplice-main | GeneSplice/apex/apex/contrib/sparsity/permutation_search_kernels/call_permutation_search_kernels.py |
from .call_permutation_search_kernels import accelerated_search_for_good_permutation
from .permutation_utilities import sum_after_2_to_4 | GeneSplice-main | GeneSplice/apex/apex/contrib/sparsity/permutation_search_kernels/__init__.py |
import numpy as np
import time
import subprocess
import math
gpus_tested = False
gpus_found = 0
kernels_found = True
try:
import permutation_search_cuda as permutation_search_cuda_kernels
print(f"Found permutation search CUDA kernels")
except ImportError:
try:
from . import permutation_search_cuda as permutation_search_cuda_kernels
print(f"Found permutation search CUDA kernels for standalone testing")
except ImportError:
print(f"Could not find permutation search CUDA kernels, falling back to CPU path")
kernels_found = False
def use_gpu(initial_override = True):
global gpus_tested, gpus_found, kernels_found
if not gpus_tested:
if not initial_override:
gpus_tested = True
return False
try:
gpus_found = str(subprocess.check_output(["nvidia-smi", "-L"])).count('UUID')
print(f"Found {gpus_found} gpus")
except:
gpus_found = 0
print(f"Could not find nvidia-smi, please check your cuda installation")
gpus_tested = True
return gpus_found > 0 and kernels_found
##############################################################################################
# pruning utilities
##############################################################################################
## apply 2:4 to some matrix
def apply_2_to_4(matrix):
for row in range(matrix.shape[0]):
for col in range(0,matrix.shape[1],4):
ix = np.argsort(np.abs(matrix[row,col:col+4]))
matrix[row,col+ix[0]] = 0.0
matrix[row,col+ix[1]] = 0.0
return matrix
## find the sum of magnitudes if 2:4 were applied to a matrix
def sum_after_2_to_4(matrix):
cur_sum = 0.0
use_cuda = use_gpu()
if not use_cuda:
for row in range(matrix.shape[0]):
for col in range(0,matrix.shape[1],4):
ix = np.argsort(np.abs(matrix[row,col:col+4]))
cur_sum += abs(matrix[row,col+ix[2]])
cur_sum += abs(matrix[row,col+ix[3]])
else:
matrix = matrix.astype(np.float32)
cuda_sum = np.zeros((1), dtype=np.float32)
matrix_view = np.copy(matrix).flatten()
sum_view = cuda_sum.flatten()
blocks = max(int(matrix.shape[1]/4/2), 1)
threads = min(max(math.ceil(matrix.shape[0]/4), 1), 1024)
result = permutation_search_cuda_kernels.sum_after_2_to_4(matrix_view,
matrix.shape[0],
matrix.shape[1],
0,
matrix.shape[1],
blocks,
threads,
sum_view)
cur_sum = sum_view[0]
return cur_sum
# perform unstructured pruning on some matrix
def unstructured_prune(matrix, sparsity):
shp = matrix.shape
matrix = matrix.flatten()
ix = np.argsort(matrix)
ix = ix[:int(len(ix)*sparsity)]
matrix[ix] = 0.0
matrix = np.reshape(matrix, shp)
return matrix
## try swapping columns and tracking magnitude after pruning
def try_swap(matrix, dst, src):
src_base = sum_after_2_to_4(matrix[...,int(src/4)*4:int(src/4)*4+4])
dst_base = sum_after_2_to_4(matrix[...,int(dst/4)*4:int(dst/4)*4+4])
# swap
matrix[...,[src,dst]] = matrix[...,[dst,src]]
# check the Nx4 slices of the swapped columns
src_sum = sum_after_2_to_4(matrix[...,int(src/4)*4:int(src/4)*4+4])
dst_sum = sum_after_2_to_4(matrix[...,int(dst/4)*4:int(dst/4)*4+4])
# swap back
matrix[...,[src,dst]] = matrix[...,[dst,src]]
return src_sum + dst_sum, (src_sum + dst_sum) - (src_base + dst_base)
## magnitude improvement from the naive 2:4 matrix / how much was lost by naive 2:4 compared to the optimal
def efficacy(optimal_lost_magnitude, base_lost_magnitude, cur_lost_magnitude):
if base_lost_magnitude == optimal_lost_magnitude:
eff = 1.0
else:
eff = (base_lost_magnitude - cur_lost_magnitude) / (base_lost_magnitude - optimal_lost_magnitude)
return eff
## find the magnitude if the rows of a matrix were pruned independently, without structure
def magnitude_after_pruning_rows(matrix, rate=0.5):
magnitude = 0.
cols = matrix.shape[1]
for r in range(matrix.shape[0]):
rowVals = matrix[r]
rowVals = np.sort(np.abs(rowVals))
magnitude += np.sum(rowVals[int(cols*rate):])
return magnitude
##############################################################################################
# permutation utilities
##############################################################################################
## exhaustively search an entire matrix on the GPU
def try_permutations_on_matrix(matrix, permutations):
use_cuda = use_gpu()
assert(use_cuda) # caller should have checked
matrix = np.copy(matrix)
matrix = matrix.astype(np.float32)
matrix_view = np.copy(matrix).flatten()
permutations_view = np.copy(np.asarray(permutations)).astype(np.uint32).flatten()
stripe_groups = np.asarray([[s for s in range(int(matrix.shape[1]/4))]]).astype(np.uint32)
stripe_groups_view = stripe_groups.flatten()
improvement = np.zeros((1), dtype=np.float32).flatten()
permutation = np.zeros((1), dtype=np.uint32).flatten()
result = permutation_search_cuda_kernels.check_permutations(matrix_view,
matrix.shape[0],
matrix.shape[1],
stripe_groups_view,
len(stripe_groups[0]),
len(stripe_groups),
permutations_view,
len(permutations),
improvement,
permutation)
return improvement[0], permutations[permutation[0]]
## find the permutation needed to make matrix A look like matrix B
def find_permutation(A, B):
permutation = []
for col in range(A.shape[1]):
Avals = A[...,col]
for bcol in range(B.shape[1]):
if np.all(Avals - B[...,bcol] == np.zeros(Avals.shape)):
permutation.append(bcol)
break
return permutation
########################################
# reasonable method to find distance between permutations
# this is used to generate permutations "between" two other permutations to divide efficacy space
#######################################
## separate a flat permutation array into its groups, sort each group and the overall order to
## put the output into a canonical order: if two permutations have the same groups, they should appear identical
def make_grouped(A):
groups = []
for x in range(0,len(A),4):
group = []
for c in range(4):
group.append(A[x+c])
group = np.sort(group)
groups.append(group)
return groups
## given two permutations, find the groups they have in common
def common_groups(A, B):
Ag = make_grouped(A)
Bg = make_grouped(B)
# convert to sets to take the intersection
As = set(tuple(Ag[g]) for g in range(len(Ag)))
Bs = set(tuple(Bg[g]) for g in range(len(Bg)))
common = As.intersection(Bs)
# flatten
C = []
for s in common:
for v in s:
C.append(v)
# group
return make_grouped(C)
## given two permutations, remove the groups that are common between them
def remove_common_groups(A, B):
Ag = make_grouped(A)
Bg = make_grouped(B)
# convert to sets to take set difference
As = set(tuple(Ag[g]) for g in range(len(Ag)))
Bs = set(tuple(Bg[g]) for g in range(len(Bg)))
Ad = As - Bs
Bd = Bs - As
# turn the differences back into flat arrays
A = []
for s in Ad:
for v in s:
A.append(v)
B = []
for s in Bd:
for v in s:
B.append(v)
# group to put into canonical order, re-flatten
A = make_grouped(A)
B = make_grouped(B)
A = [item for sublist in A for item in sublist]
B = [item for sublist in B for item in sublist]
return A,B
## given two permutations, find which elements in B need to go where to look like A
def group_differences(A, B):
Ag = make_grouped(A)
Bg = make_grouped(B)
wrong_entries = []
#for g,group in enumerate(Bg):
for g in range(len(Bg)):
group = Bg[g]
for i in range(len(group)):
val = group[i]
if val not in Ag[g]:
group_in_a = int(np.where(A == val)[0][0] / 4)
wrong_entries.append((val, g, group_in_a))
return wrong_entries
## (val, cur_group, desired_group) ==> dict[(cur_group, desired_group)] = [vals]
def dictify(wrong_entries):
result = {}
for entry in wrong_entries:
key = (entry[1], entry[2])
if key in result:
result[key].append(entry[0])
else:
result[key] = [entry[0]]
return result
## move groups of B to where they best match A's groups
def move_groups_to_match(B, A, debug=False):
Ag = make_grouped(A)
Bg = make_grouped(B)
new_Bg = [[] for g in range(len(Ag))]
wrong_entry_dict = dictify(group_differences(A, B))
if debug:
print(f"MGTM:\n\tAg: {Ag}\n\tBg: {Bg}\n\tWED: {wrong_entry_dict}")
moved_groups = []
keys_to_del = []
# move triples to the right spot
for k in wrong_entry_dict.keys():
if k[0] in moved_groups:
keys_to_del.append(k)
continue
if len(wrong_entry_dict[k]) == 3:
new_Bg[k[1]] = Bg[k[0]]
moved_groups.append(k[0])
keys_to_del.append(k)
if debug:
print(f"MGTM: moved triple {wrong_entry_dict[k]} from group {k[0]} to group {k[1]}")
for k in keys_to_del:
del wrong_entry_dict[k]
keys_to_del = []
# move doubles
for k in wrong_entry_dict.keys():
# if we've already moved the group to which this key belongs, remove it
if k[0] in moved_groups:
keys_to_del.append(k)
continue
if len(wrong_entry_dict[k]) == 2:
if len(new_Bg[k[1]]) == 0: # move it to its requested destination if possible
new_Bg[k[1]] = Bg[k[0]]
keys_to_del.append(k)
assert(k[0] not in moved_groups)
moved_groups.append(k[0])
if debug:
print(f"MGTM: moved double {wrong_entry_dict[k]} from group {k[0]} to its preferred group {k[1]}")
elif len(new_Bg[k[0]]) == 0: # otherwise leave it where it is (if possible)
new_Bg[k[0]] = Bg[k[0]]
keys_to_del.append(k)
assert(k[0] not in moved_groups)
moved_groups.append(k[0])
if debug:
print(f"MGTM: left double {wrong_entry_dict[k]} where it was in group {k[0]}")
for k in keys_to_del:
del wrong_entry_dict[k]
keys_to_del = []
# move singles
# try to leave things where they are to prevent oscillating
for k in wrong_entry_dict.keys():
if k[0] in moved_groups:
keys_to_del.append(k)
continue
if len(new_Bg[k[1]]) == 0: # requested destination
new_Bg[k[1]] = Bg[k[0]]
keys_to_del.append(k)
assert(k[0] not in moved_groups)
moved_groups.append(k[0])
if debug:
print(f"MGTM: moved single {wrong_entry_dict[k]} from group {k[0]} to its preferred group {k[1]}")
elif len(new_Bg[k[0]]) == 0:
new_Bg[k[0]] = Bg[k[0]]
keys_to_del.append(k)
assert(k[0] not in moved_groups)
moved_groups.append(k[0])
if debug:
print(f"MGTM: left group {wrong_entry_dict[k]} where it was in group {k[0]}")
for k in keys_to_del:
del wrong_entry_dict[k]
keys_to_del = []
# put what's left where it'll fit
for k in wrong_entry_dict.keys():
if k[0] in moved_groups:
keys_to_del.append(k)
continue
for dst in range(len(new_Bg)):
if len(new_Bg[dst]) == 0:
new_Bg[dst] = Bg[k[0]]
keys_to_del.append(k)
assert(k[0] not in moved_groups)
moved_groups.append(k[0])
if debug:
print(f"MGTM: put group {wrong_entry_dict[k]} where it found a spot in group {dst}")
break
for k in keys_to_del:
del wrong_entry_dict[k]
keys_to_del = []
assert(len(wrong_entry_dict) == 0)
Agsize = sum( [ len(group) for group in Ag] )
Bgsize = sum( [ len(group) for group in new_Bg] )
assert(Agsize == Bgsize)
new_B = [item for sublist in new_Bg for item in sublist]
return new_B
## swap two permutation entries and put the permutation into unique order
def swap_and_correct(permutation, src, tgt):
permutation[src],permutation[tgt] = permutation[tgt],permutation[src]
grouped = make_grouped(permutation)
grouped = [item for sublist in grouped for item in sublist]
return grouped
## make a swap that will move B in the direction of A
num_diffs = 0
def move_permutation_towards(B, A, debug=False):
global num_diffs
B = move_groups_to_match(B, A, debug)
wrong_entries = group_differences(A, B)
num_diffs = len(wrong_entries)
# nothing to do, early out
if len(wrong_entries) == 0:
if debug:
print("MPT: early out")
return B
if debug:
print(f"MPT: checking {len(wrong_entries)} diffs: {wrong_entries}")
# look for a group of three wrong entries that want to do the same thing
entry_dict = dictify(wrong_entries)
for k in entry_dict.keys():
entry = entry_dict[k]
if len(entry) == 3:
if debug:
print(f"MPT: found a triple swap at {k}: {entry_dict[k]}")
(src, dst) = k
# find the index of the one needed to complete the group
# the value is the value in A[dst] that's not in B[src]
# it's already in the destination group and may or may not need to move
group_id = dst
Ag = make_grouped(np.copy(A))
Bg = make_grouped(np.copy(B))
value = -1
for c in range(4):
if Ag[dst][c] not in Bg[src]:
value = Ag[dst][c]
if debug:
print(f"\tMPT: found the missing value {value} in A group {dst} offset {c}")
break
assert(value != -1)
# now find that value in B
idx0 = np.where(B == value)[0][0]
# find the index of the one this group doesn't need
# it's a member of the group but not in the dict entry
group_id = src
for c in range(4):
if B[group_id*4+c] not in entry_dict[k]:
if debug:
print(f"\tMPT: swapping {idx0} and {group_id*4+c}")
return swap_and_correct(B, idx0, group_id*4+c)
# look for a group of two entries that are heading to the same place as another wrong entry
victim_loner_pair = None
for k in entry_dict.keys():
entry = entry_dict[k]
if len(entry) == 2:
if debug:
print(f"MPT: found a double swap at {k}: {entry_dict[k]}")
(src, dst) = k
# find a wrong entry whose dst is the same
for k2 in entry_dict.keys():
if k2 == k:
continue
# k2 is a key whose value also belongs in stripe k2[1] (dst2)
if dst == k2[1]:
if debug:
print(f"\tMPT: found a loner going in the same direction at {k2}: {entry_dict[k2][0]}")
# instead of moving these three to where they're headed, start merging them by moving the loner into the double
# look for a complement: something moving from src to src2
(src2, dst2) = k2
complement_key = (src, src2)
if complement_key in entry_dict:
complement = entry_dict[complement_key][0]
if debug:
print(f"\t\tMPT: found a complement to the loner:{complement}")
return swap_and_correct(B, np.where(B == entry_dict[k2][0])[0][0], np.where(B == complement)[0][0])
# didn't find a complement, choose one of the two in the src group that don't belong
elif victim_loner_pair is None:
for k3 in entry_dict.keys():
if k3 == k:
continue
if k3[0] == src: # found the victim
victim = entry_dict[k3][0]
if debug:
print(f"\t\tMPT: found a victim for the double swap:{k3} -> {victim}")
victim_loner_pair = (victim, entry_dict[k2][0])
#return swap_and_correct(B, np.where(B == entry_dict[k2][0])[0][0], np.where(B == victim)[0][0])
if victim_loner_pair is not None:
if debug:
print(f"\t\tMPT: couldn't find any complements for double swaps, so going with a loner to make a triple: {victim_loner_pair}")
return swap_and_correct(B, np.where(B == victim_loner_pair[0])[0][0], np.where(B == victim_loner_pair[1])[0][0])
# look for one swap that will correct two entries
candidate_second = None
for we in range(len(wrong_entries)):
cur_entry = wrong_entries[we]
#if debug:
# print(f"\tMPT: checking {cur_entry} for complement")
for we2 in range(0,len(wrong_entries)):
pos_swap = wrong_entries[we2]
#if debug:
# print(f"\t\tMPT: is {pos_swap}?")
if cur_entry[1] == pos_swap[2] and cur_entry[2] == pos_swap[1]:
if debug:
print(f"\t\tfound complements: swapping {cur_entry} and {pos_swap}")
return swap_and_correct(B, np.where(B == cur_entry[0])[0][0], np.where(B == pos_swap[0])[0][0])
elif wrong_entries[0][2] == pos_swap[1]: # if pos_swap is currently where we[0] wants to go, keep it in mind
candidate_second = pos_swap
# fall back on picking the first one we come across
assert(candidate_second is not None)
if debug:
print(f"No complement, swapping two entries: {wrong_entries[0]} {candidate_second}")
return swap_and_correct(B, np.where(B == wrong_entries[0][0])[0][0], np.where(B == candidate_second[0])[0][0])
## find a shortest path from permutation A to B
def permutation_distance(A, B, matrix=None, magnitude_targets=None, debug=False, verbosity=0):
global num_diffs
swaps = 0
debug = False
swap_limit = int(math.pow(2,int(len(A)/4)-1))
num_diffs = swap_limit
common = []
target_results = None
if magnitude_targets is not None:
assert matrix is not None
cur_mag = sum_after_2_to_4(matrix[:,A])
target_results = [(cur_mag, A) for i in range(len(magnitude_targets))]
if verbosity > 0 and matrix is not None:
print(f"swap {'0':>4} {sum_after_2_to_4(matrix[:, B]):>15.3f}")
if verbosity > 5:
print(f"swap {0:>4}, {make_grouped(A)} {make_grouped(B)}")
while not np.all(np.array(A)-np.array(B) == np.zeros(np.array(A).shape)):
cGroups = common_groups(A, B)
for g in cGroups:
common.append(g)
A, B = remove_common_groups(A, B)
if len(A) == 0:
break
B = move_permutation_towards(np.array(B), np.array(A), debug=debug)
swaps += 1
if matrix is not None:
total_cur_permute = [c for c in B]
for c in [item for sublist in common for item in sublist]:
total_cur_permute.append(c)
if verbosity > 0 or magnitude_targets is not None:
cur_mag = sum_after_2_to_4(matrix[:,total_cur_permute])
for i in range(len(target_results)):
result = target_results[i]
if abs(magnitude_targets[i] - result[0]) > abs(magnitude_targets[i] - cur_mag):
target_results[i] = (cur_mag, total_cur_permute)
if verbosity > 0:
print(f"swap {swaps:>4} {cur_mag:>15.3f}")
if verbosity > 5 or swaps > swap_limit:
print(f"swap {swaps:>4}, {A} {B}, {num_diffs} diffs remain")
# safety net
if swaps > swap_limit+3:
return swaps, target_results
return swaps, target_results
| GeneSplice-main | GeneSplice/apex/apex/contrib/sparsity/permutation_search_kernels/permutation_utilities.py |
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
elif i == args.num_layers-1:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.output_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.output_features])
else:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
return torch.nn.Sequential(od)
def train_step(args, model, optimizer, input_batch, target_batch, step):
predicted_target = model(input_batch)
loss = ((predicted_target-target_batch)**2).sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
step = step + 1
#print("Step %d :: loss=%e" % (step, loss.item()))
return step
def train_loop(args, model, optimizer, step, num_steps):
for i in range(num_steps):
input_batch = torch.randn([args.batch_size, args.input_features]).cuda()
target_batch = torch.randn([args.batch_size, args.output_features]).cuda()
step = train_step(args, model, optimizer, input_batch, target_batch, step)
return step
def main(args):
model = build_model(args).cuda()
one_ll = next(model.children()).weight
optimizer = FusedAdam(model.parameters())
# only prune linear layers, even though we also support conv1d, conv2d and conv3d
ASP.init_model_for_pruning(model, "m4n2_1d", whitelist=[torch.nn.Linear], allow_recompute_mask=True)
ASP.init_optimizer_for_pruning(optimizer)
step = 0
# train for a few steps with dense weights
print("DENSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_dense_steps)
# simulate sparsity by inserting zeros into existing dense weights
ASP.compute_sparse_masks()
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps)
# recompute sparse masks
ASP.compute_sparse_masks()
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps_2)
# turn off sparsity
print("SPARSE :: ",one_ll)
ASP.restore_pruned_weights()
# train for a few steps with dense weights
print("DENSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_dense_steps_2)
if __name__ == '__main__':
class Args:
batch_size = 32
input_features = 16
output_features = 8
hidden_features = 40
num_layers = 4
num_dense_steps = 2000
num_sparse_steps = 3000
num_sparse_steps_2 = 1000
num_dense_steps_2 = 1500
args = Args()
main(args)
| GeneSplice-main | GeneSplice/apex/apex/contrib/sparsity/test/toy_problem.py |
import torch
import torch.onnx
from apex.contrib.sparsity.permutation_lib import Permutation
"""
Functional and behavioral correctness checking for network permutations
Each test class is a torch.nn.Module with three required members:
- self.input_shape is used to populate a dummy input
- self.expected_C_params indicates how many parameters are expected to be permuted in the C dimension
- self.expected_K_params indicates how many parameters are expected to be permuted in the K dimension
A test is successful if and only if:
1. The output of the un-permuted module matches (within a tolerance) the ouput of the permuted module
2. The number of parameters permuted in C, as reported by the Permutation class, matches the expected value in the test module
3. The number of parameters permuted in K, as reported by the Permutation class, matches the expected value in the test module
This file has all the test modules defined first, followed by the common test routine to check each module's correctness, and finally the main/entry point.
"""
class simple_convs(torch.nn.Module):
"""Stack of 2d convolutions with different normalization and activation functions"""
def __init__(
self,
num_convs: int,
channels: int,
normalization: str = 'none',
activation: str = 'ReLU',
):
super().__init__()
self.num_convs = num_convs
self.channels = channels
self.normalization = normalization
self.activation = activation
self.input_shape = [4, channels, 7, 7]
# we'll permute all convs' weights along C except the first
self.expected_C_params = -1
self.expected_K_params = 0
self.conv_stack = torch.nn.Sequential()
for c in range(self.num_convs-1):
self.conv_stack.add_module(f"conv_{c}", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1))
self.expected_C_params += 1
self.expected_K_params += 2
if self.normalization == 'BatchNorm2d':
self.conv_stack.add_module(f"norm_{c}", torch.nn.BatchNorm2d(self.channels, track_running_stats=False))
self.expected_K_params += 2
elif self.normalization == 'LazyBatchNorm2d':
self.conv_stack.add_module(f"norm_{c}", torch.nn.LazyBatchNorm2d(track_running_stats=False))
self.expected_K_params += 2
elif self.normalization == 'GroupNorm':
self.conv_stack.add_module(f"norm_{c}", torch.nn.GroupNorm(4, self.channels, affine=True))
self.expected_C_params -= 1 # GN prevents permutations of the neighboring convs
self.expected_K_params -= 2
elif self.normalization == 'InstanceNorm2d':
self.conv_stack.add_module(f"norm_{c}", torch.nn.InstanceNorm2d(self.channels, affine=True, track_running_stats=False))
self.expected_K_params += 2
elif self.normalization == 'LocalResponseNorm':
self.conv_stack.add_module(f"norm_{c}", torch.nn.LocalResponseNorm(16))
elif self.normalization == 'LayerNorm1':
self.conv_stack.add_module(f"norm_{c}", torch.nn.LayerNorm(7))
elif self.normalization == 'LayerNorm2':
self.conv_stack.add_module(f"norm_{c}", torch.nn.LayerNorm([7, 7]))
elif self.normalization == 'LayerNorm3':
self.conv_stack.add_module(f"norm_{c}", torch.nn.LayerNorm([self.channels, 7, 7]))
self.expected_K_params += 2
elif self.normalization == 'SyncBatchNorm':
self.conv_stack.add_module(f"norm_{c}", torch.nn.SyncBatchNorm(self.channels, track_running_stats=False))
self.expected_K_params += 2
self.conv_stack.add_module(f"act_{c}", torch.nn.ReLU())
self.conv_stack.add_module("conv_out", torch.nn.Conv2d(self.channels, 8, kernel_size=(1,1)))
self.expected_C_params += 1
def forward(self, x: torch.Tensor):
x = self.conv_stack(x)
return x
class conv_1d(torch.nn.Module):
"""1D convolutions in isolation and with siblings"""
def __init__(
self,
with_2d = False,
):
super().__init__()
self.input_shape = [4, 16, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.with_2d = with_2d
self.input_conv = torch.nn.Conv2d(self.input_shape[1], 32, kernel_size=(3,3), padding=1)
self.expected_K_params += 2
self.branch_a_1D = torch.nn.Conv1d(32, 32, kernel_size=3, padding=1)
self.expected_C_params += 1
self.expected_K_params += 2
if self.with_2d:
self.branch_b_2D = torch.nn.Conv2d(32, 32, kernel_size=(3,3), padding=1)
self.expected_C_params += 1
self.expected_K_params += 2
self.out_conv = torch.nn.Conv2d(32, 8, kernel_size=(1,1))
self.expected_C_params += 1
def forward(self, x: torch.Tensor):
step0 = self.input_conv(x)
s0shape = step0.shape
step1 = self.branch_a_1D(step0.view(s0shape[0], s0shape[1], s0shape[2]*s0shape[3])).view(s0shape)
if self.with_2d:
step1 = step1 + self.branch_b_2D(step0)
return self.out_conv(step1)
class grouped_convs(torch.nn.Module):
"""Stack of 2d convolutions with different types of grouped convolutions"""
def __init__(
self,
):
super().__init__()
self.channels = 128
self.input_shape = [4, self.channels, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.conv_stack = torch.nn.Sequential()
self.conv_stack.add_module("conv_in", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1))
# dw conv will let previous and this layers' weights and biases permute along K
self.expected_K_params += 4
self.conv_stack.add_module("conv_dw", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1, groups=self.channels))
# regular conv permutes both
self.expected_C_params += 1
self.expected_K_params += 2
self.conv_stack.add_module("conv_0", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1, groups=1)) # explicit '1' groups for extra coverage
# only 2 groups should allow permutations only in C
self.expected_C_params += 1
self.conv_stack.add_module("conv_gr2", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1, groups=2))
# another regular conv, this one can't do anything
self.conv_stack.add_module("conv_1", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1))
# finally, grouped conv with small groups
self.conv_stack.add_module("conv_gr64", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1, groups=self.channels//2))
def forward(self, input: torch.Tensor):
return self.conv_stack(input)
class simple_forks_joins(torch.nn.Module):
"""Some simple residual connections to test collecting parameters into a single group. Four sections: input, blocka + residual, blockb + blockc, output"""
def __init__(
self,
):
super().__init__()
self.channels = 64
self.input_shape = [4, self.channels, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.input_convs = torch.nn.Sequential()
# input conv can only permute along K
self.expected_K_params += 2
self.input_convs.add_module("conv_in0", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1))
# the next conv can permute along both C and K
self.expected_C_params += 1
self.expected_K_params += 2
self.input_convs.add_module("conv_in1", torch.nn.Conv2d(self.channels, self.channels, kernel_size=(3,3), padding=1))
# BN will permute 2 more along K
self.expected_K_params += 2
self.input_convs.add_module("bn_in1", torch.nn.BatchNorm2d(self.channels, track_running_stats=False))
self.block_a = torch.nn.Sequential()
# cut channels in half, then back to full, two fully permutable convs
self.expected_C_params += 2
self.expected_K_params += 4
self.block_a.add_module("conv_a0", torch.nn.Conv2d(self.channels, self.channels // 2, kernel_size=(3,3), padding=1))
self.block_a.add_module("conv_a1", torch.nn.Conv2d(self.channels // 2, self.channels, kernel_size=(3,3), padding=1))
self.block_b = torch.nn.Sequential()
# cut channels in half, then back to full, two fully permutable convs
self.expected_C_params += 2
self.expected_K_params += 4
self.block_b.add_module("conv_b0", torch.nn.Conv2d(self.channels, self.channels // 2, kernel_size=(3,3), padding=1))
self.block_b.add_module("conv_b1", torch.nn.Conv2d(self.channels // 2, self.channels, kernel_size=(3,3), padding=1))
self.block_c = torch.nn.Sequential()
# cut channels in half, then back to full, two fully permutable convs
self.expected_C_params += 2
self.expected_K_params += 4
self.block_c.add_module("conv_c0", torch.nn.Conv2d(self.channels, self.channels // 2, kernel_size=(3,3), padding=1))
self.block_c.add_module("conv_c1", torch.nn.Conv2d(self.channels // 2, self.channels, kernel_size=(3,3), padding=1))
self.output_conv = torch.nn.Sequential()
self.expected_C_params += 1
self.output_conv.add_module("conv_out", torch.nn.Conv2d(self.channels, 8, kernel_size=(3,3), padding=1))
def forward(self, input: torch.Tensor):
step0 = self.input_convs(input)
step1 = step0 + self.block_a(step0)
step2 = self.block_b(step1) + self.block_c(step1)
return self.output_conv(step2)
class different_grouped_convs(torch.nn.Module):
"""Convolutions with different group sizes need to use the GCD of the input channel counts if siblings"""
def __init__(
self,
):
super().__init__()
self.channels = 16
self.input_shape = [4, self.channels, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.input_conv = torch.nn.Sequential()
self.expected_K_params += 2
self.input_conv.add_module("input_conv", torch.nn.Conv2d(self.channels, 128, kernel_size=(3,3), padding=1))
self.expected_C_params += 4
# 4 parallel blocks with decreasing group size from "left" to "right"
self.block_a = torch.nn.Sequential()
self.block_a.add_module("conv_a", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.block_b = torch.nn.Sequential()
self.block_b.add_module("conv_b", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1, groups=2))
self.block_c = torch.nn.Sequential()
self.block_c.add_module("conv_c", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1, groups=4))
self.block_d = torch.nn.Sequential()
self.block_d.add_module("conv_d", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1, groups=8))
# output can't permute along C, disallowed by parents
self.output_conv = torch.nn.Sequential()
self.output_conv.add_module("output_conv", torch.nn.Conv2d(128, 8, kernel_size=(3,3), padding=1))
def forward(self, input: torch.Tensor):
step0 = self.input_conv(input)
step1 = self.block_a(step0) + self.block_b(step0) + self.block_c(step0) + self.block_d(step0)
return self.output_conv(step1)
class siblings_poison(torch.nn.Module):
"""A single sibling that cannot permute along C poisons all other siblings in its group"""
def __init__(
self,
):
super().__init__()
self.input_shape = [4, 16, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.input_conv = torch.nn.Sequential()
self.input_conv.add_module("input_conv", torch.nn.Conv2d(self.input_shape[1], 128, kernel_size=(3,3), padding=1))
# two parallel block: conv->flatten->linear | flatten->linear
self.expected_K_params += 4 # two linears will have their output channels permuted for the output layer
self.block_a = torch.nn.Sequential()
self.block_a.add_module("conv_a", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.block_a.add_module("flatten_a", torch.nn.Flatten(1))
self.block_a.add_module("linear_a", torch.nn.Linear(6272, 128))
self.block_b = torch.nn.Sequential()
self.block_b.add_module("flatten_b", torch.nn.Flatten(1))
self.block_b.add_module("linear_b", torch.nn.Linear(6272, 128))
self.output = torch.nn.Sequential()
self.expected_C_params += 1 # output layer will have its C dimension permuted
self.output.add_module("output", torch.nn.Linear(128, 8))
def forward(self, input: torch.Tensor):
step0 = self.input_conv(input)
step1 = self.block_a(step0) + self.block_b(step0)
return self.output(step1)
class coparent_poison(torch.nn.Module):
"""A single coparent that cannot permute along K poisons all other coparents in its group"""
def __init__(
self,
):
super().__init__()
self.input_shape = [4, 16, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.input_conv = torch.nn.Sequential()
self.expected_K_params += 2
self.input_conv.add_module("input_conv", torch.nn.Conv2d(self.input_shape[1], 128, kernel_size=(3,3), padding=1))
# two parallel block: conv | conv-> grouped conv
self.expected_C_params += 3 # all convs permute along C
self.expected_K_params += 2 # only conv_b0 permutes along K
self.block_a = torch.nn.Sequential()
self.block_a.add_module("conv_a", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.block_b = torch.nn.Sequential()
self.block_b.add_module("conv_b0", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.block_b.add_module("conv_b1", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1, groups=4))
self.output = torch.nn.Sequential()
self.output.add_module("output", torch.nn.Conv2d(128, 8, kernel_size=(1,1)))
def forward(self, input: torch.Tensor):
step0 = self.input_conv(input)
step1 = self.block_a(step0) + self.block_b(step0)
return self.output(step1)
class depthwise_child_is_sibling(torch.nn.Module):
"""The child of a depthwise convolution should act as a sibling"""
def __init__(
self,
):
super().__init__()
self.input_shape = [4, 16, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.input_conv = torch.nn.Sequential()
self.expected_K_params += 2
self.input_conv.add_module("input_conv", torch.nn.Conv2d(self.input_shape[1], 128, kernel_size=(3,3), padding=1))
# two parallel block: conv | depthwise->conv
self.expected_C_params += 2
self.expected_K_params += 4 + 2
self.block_a = torch.nn.Sequential()
self.block_a.add_module("conv_a", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.block_b = torch.nn.Sequential()
self.block_b.add_module("conv_b_dw", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1, groups=128))
self.block_b.add_module("conv_b_1", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.output_conv = torch.nn.Sequential()
self.expected_C_params += 1
self.output_conv.add_module("output_conv", torch.nn.Conv2d(128, 8, kernel_size=(1,1)))
def forward(self, input: torch.Tensor):
step0 = self.input_conv(input)
step1 = self.block_a(step0) + self.block_b(step0)
return self.output_conv(step1)
class module_attribute(torch.nn.Module):
"""Attributes of some module must be permuted if they feed some operation that is permuted"""
def __init__(
self,
complexity: int = 0,
):
super().__init__()
self.input_shape = [4, 16, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.complexity = complexity
self.input_conv = torch.nn.Sequential()
self.expected_K_params += 3 # conv weight, conv bias, input_offset C (counts as K since it's acting as a parent)
self.input_offset = torch.nn.Parameter(torch.zeros(128,7,7))
torch.nn.init.normal_(self.input_offset.data, mean=0.0, std=2.0)
self.input_conv.add_module("conv_input", torch.nn.Conv2d(self.input_shape[1], 128, kernel_size=(3,3), padding=1))
# add a couple more layers, and let the same offset affect another layer, as well
if complexity == 1:
self.expected_C_params += 2
self.expected_K_params += 4
self.stack_a = torch.nn.Sequential()
self.stack_a.add_module("conv_a", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.stack_b = torch.nn.Sequential()
self.stack_b.add_module("conv_b", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.output_conv = torch.nn.Sequential()
self.expected_C_params += 1
self.output_conv.add_module("conv_output", torch.nn.Conv2d(128, 8, kernel_size=(3,3)))
def forward(self, input: torch.Tensor):
batch_input_offset = self.input_offset.expand(input.shape[0], -1, -1, -1)
x = self.input_conv(input) + batch_input_offset
if self.complexity == 1:
x = self.stack_a(x) + batch_input_offset
x = self.stack_b(x) + batch_input_offset
return self.output_conv(x)
class square_attribute(torch.nn.Module):
"""Attributes with multiple dimensions matching the permutation length should only be permuted along the correct dimension"""
# TODO: currently, such an attribute will disallow permutations around it, but with effort, it could be handled correctly.
def __init__(
self,
):
super().__init__()
self.input_shape = [4, 16, 16]
self.expected_C_params = 0
self.expected_K_params = 0
self.input_linear = torch.nn.Sequential()
#self.expected_K_params += 2 # if handled correctly, the linear's K and the offset's K should both be permuted
self.input_linear.add_module("linear_input", torch.nn.Linear(self.input_shape[1], 16))
self.input_offset = torch.nn.Parameter(torch.zeros(16, 16))
torch.nn.init.normal_(self.input_offset.data, mean=0.0, std=2.0)
self.output_linear = torch.nn.Sequential()
#self.expected_C_params += 1 # if handled correctly, this should be permuted
self.output_linear.add_module("linear_output", torch.nn.Linear(16, 8))
def forward(self, input: torch.Tensor):
batch_input_offset = self.input_offset.expand(input.shape[0], -1, -1)
x = self.input_linear(input) + torch.permute(batch_input_offset, (0, 2, 1))
return self.output_linear(x)
class MHA_test(torch.nn.Module):
"""MultiheadAttention modules are unique, we need to check permutations for input and ouput projections"""
def __init__(
self,
hidden_dim: int = 256,
seq_len: int = 64,
num_heads: int = 16
):
super().__init__()
self.hidden_dim = hidden_dim
self.seq_len = seq_len
self.num_heads = num_heads
self.input_shape = [4, self.seq_len, self.hidden_dim]
self.expected_C_params = 1
self.expected_K_params = 2
self.MHA0 = torch.nn.MultiheadAttention(self.hidden_dim, self.num_heads, dropout=False, batch_first=True)
self.MHA1 = torch.nn.MultiheadAttention(self.hidden_dim, self.num_heads, dropout=False, batch_first=True)
def forward(self, input: torch.Tensor):
step0,_ = self.MHA0(input, input, input)
step1,_ = self.MHA1(step0, step0, step0)
return step1
class one_sparse_sibling(torch.nn.Module):
"""If only one of two siblings is sparse, both need to be permuted"""
def __init__(
self,
):
super().__init__()
self.input_shape = [4, 16, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.in_conv = torch.nn.Sequential()
self.expected_K_params += 2
self.in_conv.add_module("conv_in", torch.nn.Conv2d(self.input_shape[1], 128, kernel_size=(3,3), padding=1))
self.block_a = torch.nn.Sequential()
self.expected_C_params += 1 # only conv_a0 will be permuted along C
self.expected_K_params += 2 # only conv_a1 will be permuted along K
self.block_a.add_module("conv_a0", torch.nn.Conv2d(128, 3, kernel_size=(1,1)))
self.block_a.add_module("conv_a1", torch.nn.Conv2d(3, 128, kernel_size=(3,3), padding=1))
self.block_b = torch.nn.Sequential()
self.expected_C_params += 2 # even though conv_a0 will not be sparse (only 3 output channels), conv_b0 can still be permuted along C
self.expected_K_params += 4
self.block_b.add_module("conv_b0", torch.nn.Conv2d(128, 128, kernel_size=(3,3), padding=1))
self.block_b.add_module("conv_b1", torch.nn.Conv2d(128, 128, kernel_size=(1,1)))
self.out_conv = torch.nn.Sequential()
self.expected_C_params += 1
self.out_conv.add_module("conv_out", torch.nn.Conv2d(128, 8, kernel_size=(1,1)))
def forward(self, input: torch.Tensor):
step0 = self.in_conv(input)
step1 = self.block_a(step0) + self.block_b(step0)
return self.out_conv(step1)
class test_concat(torch.nn.Module):
"""If concats are along the channel dimension (dim1 of NCHW), downstream layers can still be permuted despite C!=parentK"""
def __init__(
self,
ratio = 1, # ratio between # channels in either path to be concatenated
dim = 1, # dimension to concatenate, K by default
depth = 1, # number of concats to stack
):
super().__init__()
assert dim == 1 or ratio == 1 ,"can't concat along dimensions other than K if K's don't match"
self.dim = dim
self.depth = depth
self.input_shape = [4, 16, 7, 7]
self.expected_C_params = 0
self.expected_K_params = 0
self.in_conv = torch.nn.Sequential()
self.expected_K_params += 2
self.in_conv.add_module("conv_in", torch.nn.Conv2d(self.input_shape[1], 64, kernel_size=(1,1)))
self.left_paths = torch.nn.ModuleList([torch.nn.Conv2d(64, 64, kernel_size=(1,1))])
self.expected_C_params += 1
self.expected_K_params += 2
in_C = 64
out_C = 64
for d in range(1,depth,1):
self.expected_C_params += 1
self.expected_K_params += 2
if dim == 1:
out_C += 64
self.left_paths.append(torch.nn.Conv2d(in_C+64, out_C, kernel_size=(1,1)))
if dim == 1:
in_C += 64
self.right_path = torch.nn.Sequential()
self.expected_C_params += 1
self.expected_K_params += 2
self.right_path.add_module("conv_b", torch.nn.Conv2d(64, 64*ratio, kernel_size=(1,1)))
self.out_conv = torch.nn.Sequential()
self.expected_C_params += 1
if dim == 1:
out_C += 64*ratio
self.out_conv.add_module("conv_out", torch.nn.Conv2d(out_C, 16, kernel_size=(1,1)))
def forward(self, input: torch.Tensor):
step0 = self.in_conv(input)
step1 = step0
for d, layer in enumerate(self.left_paths):
if d == 0:
step1 = layer(step1)
else:
step1 = layer(torch.cat([step1, step0], 1))
step2 = torch.cat([step1, self.right_path(step0)], self.dim)
return self.out_conv(step2)
class test_flatten_op(torch.nn.Module):
"""flatten ops may change the effective channel count, typically by collapsing N,C,H,W into N,C*H*W before a classifier"""
def __init__(
self,
change_dims = True,
):
super().__init__()
self.change_dims = change_dims
self.input_shape = [4, 16, 3, 3]
self.expected_C_params = 0
self.expected_K_params = 0
if not self.change_dims:
self.input_shape = [4, 16, 1, 1]
self.expected_C_params = 1
self.expected_K_params = 2
self.flattened_C = self.input_shape[2] * self.input_shape[3] * 64
self.in_conv = torch.nn.Conv2d(self.input_shape[1], 64, kernel_size=(1,1))
self.out_gemm = torch.nn.Linear(self.flattened_C, 16)
def forward(self, input: torch.Tensor):
step0 = self.in_conv(input)
step1 = torch.flatten(step0, start_dim=1)
return self.out_gemm(step1)
class test_flatten_module(torch.nn.Module):
"""flatten modules may change the effective channel count, typically by collapsing N,C,H,W into N,C*H*W before a classifier"""
def __init__(
self,
change_dims = True,
):
super().__init__()
self.change_dims = change_dims
self.input_shape = [4, 16, 3, 3]
self.expected_C_params = 0
self.expected_K_params = 0
if not self.change_dims:
self.input_shape = [4, 16, 1, 1]
self.expected_C_params = 1
self.expected_K_params = 2
self.flattened_C = self.input_shape[2] * self.input_shape[3] * 64
self.stack = torch.nn.Sequential()
self.stack.add_module("conv_in", torch.nn.Conv2d(self.input_shape[1], 64, kernel_size=(1,1)))
self.stack.add_module("flatten", torch.nn.Flatten(1))
self.stack.add_module("gemm_out", torch.nn.Linear(self.flattened_C, 16))
def forward(self, input: torch.Tensor):
return self.stack(input)
class test_trace_failure(torch.nn.Module):
"""make sure tracing failures are handled gracefully"""
def __init__(
self
):
super().__init__()
self.input_shape = [4, 16, 1, 1]
self.expected_C_params = 0
self.expected_K_params = 0
self.in_conv = torch.nn.Conv2d(self.input_shape[1], 64, kernel_size=(1,1))
self.out_conv = torch.nn.Conv2d(64, 16, kernel_size=(1,1))
def forward(self, input: torch.Tensor):
step0 = self.in_conv(input)
#NCHW = 4,64,1,1
channels = step0.size(1)
channel_offset = torch.arange(channels, dtype=torch.long, device=step0.device)
channel_offset = channel_offset.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(step0)
step0.add_(channel_offset)
return self.out_conv(step0)
class already_sparse(torch.nn.Module):
"""if weights are already sparse, permutations should be skipped"""
def __init__(
self
):
super().__init__()
self.input_shape = [4, 16, 3, 3]
self.expected_C_params = 0
self.expected_K_params = 0
self.in_conv = torch.nn.Conv2d(self.input_shape[1], 64, kernel_size=(1,1))
self.out_conv = torch.nn.Conv2d(64, 16, kernel_size=(1,1))
# apply 2:4 to the output weights, it will not require a permutation
out_weights = torch.ones_like(self.out_conv.weight)
out_weights[:,0::2,...] = 0
assert torch.sum(out_weights) == torch.numel(out_weights)/2
self.out_conv.weight.data.copy_(out_weights)
def forward(self, input: torch.Tensor):
step0 = self.in_conv(input)
return self.out_conv(step0)
def test_model(model, tag, verbosity=0, save_onnx=False):
Permutation.set_identical_seed()
x = torch.rand(model.input_shape)
if save_onnx:
torch.onnx.export(model, x, f"{tag}.onnx", verbose=False)
base_out = model(x)
sparse_parameters = []
all_parameters = []
module_to_params = {}
module_to_params[torch.nn.MultiheadAttention] = ('q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight')
for module_name, module in model.named_modules():
module_type_str = str(type(module)).split("\'")[1]
if module_type_str == 'torch.nn.modules.container.Sequential' or module_type_str.startswith('torchvision.models'):
# filter out the 'torch.nn.modules.container.Sequential' type and the whole model, like 'torchvision.models.vgg.VGG'
continue
for p_name, p in module.named_parameters():
all_parameters.append((module_name, module, p_name, p))
if isinstance(module, (torch.nn.Linear, torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.MultiheadAttention, torch.nn.modules.linear.NonDynamicallyQuantizableLinear)):
allowed_names = ('weight',)
if type(module) in module_to_params.keys():
allowed_names = module_to_params[type(module)]
if p_name not in allowed_names:
continue
if len(p.size()) >= 2 and (p.size()[0] % 8) == 0 and (p.size()[1] % 16) == 0:
mask = torch.ones_like(p).bool()
buffname = p_name.split(".")[-1]
module.register_buffer('__%s_mma_mask' % buffname, mask)
sparse_parameters.append((module_name, module, p_name, p, mask, None))
if module_type_str == 'torch.nn.modules.batchnorm.BatchNorm2d':
# need to get the running_mean and running_var from model.state_dict(), as they are not the learnable parameters
module_mean_name = module_name + '.running_mean'
module_var_name = module_name + '.running_var'
for param_key in model.state_dict():
if module_mean_name == param_key or module_var_name == param_key:
all_parameters.append((module_name, module, param_key.split(".")[-1], model.state_dict()[param_key]))
if verbosity > 1:
sparse_param_names = [module_name+":"+p_name for (module_name, module, p_name, p, mask, pruned) in sparse_parameters]
all_param_names = [module_name+":"+p_name for (module_name, module, p_name, p) in all_parameters]
print(f"\tSparse parameter names: {sparse_param_names}\n\tAll parameter names: {all_param_names}")
Permutation.set_permutation_params_from_asp(model, sparse_parameters, all_parameters, verbosity)
Permutation.permute_model(model)
C_params, K_params, missed_dims = Permutation.get_permutation_stats()
success = True
fail_str = ""
succ_str = ""
if len(C_params) != model.expected_C_params:
success = False
fail_str = fail_str + f"\n\tC expected {model.expected_C_params}, got {len(C_params)} ({C_params})"
elif verbosity > 0:
succ_str = succ_str + f"\n\tC expected {model.expected_C_params}, got {len(C_params)} ({C_params})"
if len(K_params) != model.expected_K_params:
success = False
fail_str = fail_str + f"\n\tK expected {model.expected_K_params}, got {len(K_params)} ({K_params})"
elif verbosity > 0:
succ_str = succ_str + f"\n\tK expected {model.expected_K_params}, got {len(K_params)} ({K_params})"
if len(missed_dims) != 0:
success = False
fail_str = fail_str + f"\n\tMissed permutations along {len(missed_dims)} dimensions ({missed_dims})"
perm_out = model(x)
atol = 1e-5
rtol = 1e-4
outs_match = torch.allclose(base_out.data, perm_out.data, atol=atol, rtol=rtol)
if not outs_match:
fail_str = fail_str + f"\n\tOutputs matched: {outs_match}"
if success:
diffs = base_out - perm_out
diff_locs = (diffs >= atol).nonzero(as_tuple=True)
fail_str = fail_str + f"\n{diff_locs}\n{diffs[diff_locs]}"
success = False
if success:
print(f"{tag}: Success\t{succ_str}")
else:
print(f"{tag}: FAIL\t{fail_str}")
return success
def main():
global_success = True
global_success &= test_model(simple_convs(2,16), "smoke test")
global_success &= test_model(simple_convs(5, 64), "simple 5 64")
global_success &= test_model(simple_convs(10, 32), "simple 10 32")
# normalization
for norm in ['BatchNorm2d', 'LazyBatchNorm2d', 'InstanceNorm2d', 'LazyInstanceNorm2d', 'LayerNorm3', 'LocalResponseNorm']:
global_success &= test_model(simple_convs(4, 128, norm), norm)
# disallowed normalization
for norm in ['GroupNorm']:
global_success &= test_model(simple_convs(4, 128, norm), norm)
global_success &= test_model(conv_1d(), "conv1d")
global_success &= test_model(conv_1d(with_2d=True), "conv1d and conv2d")
global_success &= test_model(grouped_convs(), "grouped convs")
global_success &= test_model(simple_forks_joins(), "forks and joins")
global_success &= test_model(different_grouped_convs(), "GCD")
global_success &= test_model(siblings_poison(), "sibling poison")
global_success &= test_model(coparent_poison(), "coparent poison")
global_success &= test_model(depthwise_child_is_sibling(), "dw child is sibling")
global_success &= test_model(module_attribute(complexity=0), "single attribute")
global_success &= test_model(module_attribute(complexity=1), "single attribute thrice")
global_success &= test_model(MHA_test(hidden_dim=256, seq_len=64, num_heads=16), "stacked MHA")
global_success &= test_model(one_sparse_sibling(), "one sparse sibling")
global_success &= test_model(test_concat(), "simple concat") # concat along K
global_success &= test_model(test_concat(dim=0), "concat dim0") # concat along C
global_success &= test_model(test_concat(ratio=2), "concat ratio2") # concat along K with different K values
global_success &= test_model(test_concat(depth=2), "concat depth2") # concat along K multiple times
global_success &= test_model(test_concat(depth=3), "concat depth3")
global_success &= test_model(test_concat(ratio=3, depth=4), "concat ratio3 depth4")
global_success &= test_model(test_concat(dim=0, depth=3), "concat dim0 depth3")
global_success &= test_model(test_flatten_op(), "flatten op")
global_success &= test_model(test_flatten_op(change_dims=False), "useless flatten op")
global_success &= test_model(test_flatten_module(), "flatten module")
global_success &= test_model(test_flatten_module(change_dims=False), "useless flatten module")
global_success &= test_model(test_trace_failure(), "trace failure")
global_success &= test_model(already_sparse(), "skip already sparse")
global_success &= test_model(square_attribute(), "square attributes")
if global_success:
print("All tests completed successfully.")
else:
print("There was at least one failure.")
if __name__ == '__main__':
main()
| GeneSplice-main | GeneSplice/apex/apex/contrib/sparsity/test/test_permutation_application.py |
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
elif i == args.num_layers-1:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.output_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.output_features])
else:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
return torch.nn.Sequential(od)
def train_step(args, model, optimizer, input_batch, target_batch, step):
predicted_target = model(input_batch)
loss = ((predicted_target-target_batch)**2).sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
step = step + 1
#print("Step %d :: loss=%e" % (step, loss.item()))
return step
def train_loop(args, model, optimizer, step, num_steps):
for i in range(num_steps):
input_batch = torch.randn([args.batch_size, args.input_features]).cuda()
target_batch = torch.randn([args.batch_size, args.output_features]).cuda()
step = train_step(args, model, optimizer, input_batch, target_batch, step)
return step
def main(step, args, model_state_dict, optimizer_state_dict):
#
# PART2
#
model = build_model(args).cuda()
one_ll = next(model.children()).weight
optimizer = FusedAdam(model.parameters())
ASP.init_model_for_pruning(model, args.pattern, verbosity=args.verbosity, whitelist=args.whitelist, allow_recompute_mask=args.allow_recompute_mask)
ASP.init_optimizer_for_pruning(optimizer)
torch.manual_seed(args.seed2)
model.load_state_dict(model_state_dict)
optimizer.load_state_dict(optimizer_state_dict)
print("Model sparsity is %s" % ("enabled" if ASP.is_sparsity_enabled() else "disabled"))
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps_2)
if __name__ == '__main__':
checkpoint = torch.load("part1.chkp")
class Args:
verbosity = checkpoint['verbosity']
seed = 4873
seed2 = checkpoint['seed2']
pattern = checkpoint['pattern']
whitelist = checkpoint['whitelist']
allow_recompute_mask = checkpoint['allow_recompute_mask']
batch_size = 32
input_features = 8
output_features = 8
hidden_features = 32
num_layers = 4
num_dense_steps = 2000
num_sparse_steps = 3000
num_sparse_steps_2 = 1000
checkpoint_path = "part1.chkp"
args = Args()
main(checkpoint['step'], args, checkpoint['model_state_dict'], checkpoint['optimizer_state_dict'])
| GeneSplice-main | GeneSplice/apex/apex/contrib/sparsity/test/checkpointing_test_part2.py |
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
elif i == args.num_layers-1:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.output_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.output_features])
else:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
return torch.nn.Sequential(od)
def train_step(args, model, optimizer, input_batch, target_batch, step):
predicted_target = model(input_batch)
loss = ((predicted_target-target_batch)**2).sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
step = step + 1
#print("Step %d :: loss=%e" % (step, loss.item()))
return step
def train_loop(args, model, optimizer, step, num_steps):
for i in range(num_steps):
input_batch = torch.randn([args.batch_size, args.input_features]).cuda()
target_batch = torch.randn([args.batch_size, args.output_features]).cuda()
step = train_step(args, model, optimizer, input_batch, target_batch, step)
return step
def main(args):
#
# PART1
#
torch.manual_seed(args.seed)
model = build_model(args).cuda()
one_ll = next(model.children()).weight
optimizer = FusedAdam(model.parameters())
ASP.init_model_for_pruning(model, args.pattern, verbosity=args.verbosity, whitelist=args.whitelist, allow_recompute_mask=args.allow_recompute_mask)
ASP.init_optimizer_for_pruning(optimizer)
step = 0
# train for a few steps with dense weights
print("DENSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_dense_steps)
# simulate sparsity by inserting zeros into existing dense weights
ASP.compute_sparse_masks()
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps)
torch.save({
'step': step,
'verbosity': args.verbosity,
'seed2': args.seed2,
'pattern': args.pattern,
'whitelist': args.whitelist,
'allow_recompute_mask': args.allow_recompute_mask,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, args.checkpoint_path)
if __name__ == '__main__':
class Args:
verbosity=3
seed = 4873
seed2 = 99875
pattern = "m4n2_2d_best"
whitelist = [torch.nn.Linear]
allow_recompute_mask = True
batch_size = 32
input_features = 8
output_features = 8
hidden_features = 32
num_layers = 4
num_dense_steps = 2000
num_sparse_steps = 3000
num_sparse_steps_2 = 1000
checkpoint_path = "part1.chkp"
args = Args()
main(args)
| GeneSplice-main | GeneSplice/apex/apex/contrib/sparsity/test/checkpointing_test_part1.py |
from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
#
# Reference run for checkpointing test (part1 + part2)
#
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
elif i == args.num_layers-1:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.output_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.output_features])
else:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.hidden_features, args.hidden_features)
od['layer_norm_%d' % (i+1)] = torch.nn.LayerNorm([args.batch_size, args.hidden_features])
return torch.nn.Sequential(od)
def train_step(args, model, optimizer, input_batch, target_batch, step):
predicted_target = model(input_batch)
loss = ((predicted_target-target_batch)**2).sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
step = step + 1
#print("Step %d :: loss=%e" % (step, loss.item()))
return step
def train_loop(args, model, optimizer, step, num_steps):
for i in range(num_steps):
input_batch = torch.randn([args.batch_size, args.input_features]).cuda()
target_batch = torch.randn([args.batch_size, args.output_features]).cuda()
step = train_step(args, model, optimizer, input_batch, target_batch, step)
return step
def main(args):
#
# PART1
#
torch.manual_seed(args.seed)
model = build_model(args).cuda()
one_ll = next(model.children()).weight
optimizer = FusedAdam(model.parameters())
ASP.init_model_for_pruning(model, args.pattern, whitelist=args.whitelist, allow_recompute_mask=args.allow_recompute_mask)
ASP.init_optimizer_for_pruning(optimizer)
step = 0
# train for a few steps with dense weights
print("DENSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_dense_steps)
# simulate sparsity by inserting zeros into existing dense weights
ASP.compute_sparse_masks()
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps)
#
# PART 2
#
torch.manual_seed(args.seed2)
# train for a few steps with sparse weights
print("SPARSE :: ",one_ll)
step = train_loop(args, model, optimizer, step, args.num_sparse_steps_2)
if __name__ == '__main__':
class Args:
seed = 4873
seed2 = 99875
pattern = "m4n2_2d_best"
whitelist = [torch.nn.Linear]
allow_recompute_mask = True
batch_size = 32
input_features = 8
output_features = 8
hidden_features = 32
num_layers = 4
num_dense_steps = 2000
num_sparse_steps = 3000
num_sparse_steps_2 = 1000
checkpoint_path = "part1.chkp"
args = Args()
main(args)
| GeneSplice-main | GeneSplice/apex/apex/contrib/sparsity/test/checkpointing_test_reference.py |
import numpy as np
import time
import sys
# permutation-specifics
sys.path.append("../")
from permutation_search_kernels.permutation_utilities import *
from permutation_search_kernels.exhaustive_search import Exhaustive_Search
from permutation_search_kernels.channel_swap import Channel_Swap
# Arguments
import argparse
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description='Test channel permutations')
parser.add_argument('--infile', default='random', type=str, help='input file or "random"')
parser.add_argument('--channels', default=384, type=int, help='random input channel count (C)')
parser.add_argument('--filters', default=96, type=int, help='random input filter count (K)')
parser.add_argument('--verbosity', default=0, type=int, help='print status updates')
parser.add_argument('--seed', default=1, type=int, help='random seed')
parser.add_argument('--pretty_print', default=True, type=str2bool, help='print the table for pretty viewing (as opposed to strict .csv)')
parser.add_argument('--unstructured', default=0.0, type=float, help='perform unstructured pruning to a target sparsity before processing, emulate an unstructured sparse network. "-1" will find the minimum sparsity required to achieve a perfect permutation')
parser.add_argument('--gpu', default=True, type=str2bool, help='uses a gpu to accelerate the search if possible')
parser.add_argument('--check_permutation', default=False, type=str2bool, help='check that the tracked permutation matches the recovered permutation')
parser.add_argument('--intermediate_steps', default=0, type=int, help='find roughly evenly-spaced permutations in efficacy')
parser.add_argument('--print_permutation', default=False, type=str2bool, help='print the final permutation found by each strategy')
parser.add_argument('strategies', metavar='strategy', type=str, nargs='+', help='strategies to try')
## binary search for the minimum sparsity necessary to achieve a perfect permutation with some strategy
def find_minimum_sparsity(matrix, search_function, **kwargs):
duration = 0
min_sparsity = 50
max_sparsity = 100
sparsity = 75
verbosity = 0
if 'verbosity' in kwargs:
verbosity = kwargs['verbosity']
while min_sparsity < max_sparsity:
if verbosity > 5:
print(f"\tlooking now at {sparsity} (between {min_sparsity} and {max_sparsity})")
# prepare unstructured sparse matrix, get row sparsity magnitude
tmp_result = unstructured_prune(result, sparsity/100.0)
local_unpruned_magnitude = np.sum(np.abs(tmp_result))
local_unstructured_rows_magnitude = magnitude_after_pruning_rows(tmp_result, rate=0.5)
# quick check to see if this sparsity is trivially too low
if local_unstructured_rows_magnitude*1.0001 < local_unpruned_magnitude:
if verbosity > 5:
print(f"Skipping sparsity {sparsity} since there's no perfect permutation (unstructured mag {local_unpruned_magnitude} is larger than sparse rows {local_unstructured_rows_magnitude}).")
min_sparsity = sparsity+1
sparsity = int(min_sparsity + (max_sparsity - min_sparsity)/2.0)
continue
tmp_result, tmp_duration, found_permutation = search_function(tmp_result, **kwargs)
duration += tmp_duration
nonzeros = np.count_nonzero(tmp_result)
tmp_result = apply_2_to_4(tmp_result)
nonzeros_after_2to4 = np.count_nonzero(tmp_result)
if nonzeros == nonzeros_after_2to4: # found a winner, are we done?
if verbosity > 3:
print(f"Found an unstructured sparsity that we can turn into 2:4: {sparsity}")
max_sparsity = sparsity
if max_sparsity <= min_sparsity and verbosity > 0:
print(f"Found the minimum unstructured sparsity that we can turn into 2:4: {sparsity}")
break
else:
if verbosity > 5:
print(f"Unstructured sparsity {sparsity} was insufficient to produce 2:4 sparsity")
min_sparsity = sparsity+1
if max_sparsity <= min_sparsity and verbosity > 0:
print(f"Found the minimum unstructured sparsity that we can turn into 2:4: {max_sparsity}")
sparsity = max_sparsity
break
sparsity = int(min_sparsity + (max_sparsity - min_sparsity)/2.0)
return sparsity, duration
# Entry point
if __name__ == "__main__":
args = parser.parse_args()
verbosity = args.verbosity
np.random.seed(seed=args.seed)
use_gpu(initial_override=args.gpu)
# get or create the input matrix
input_vals = np.random.rand(args.filters, args.channels)
if args.infile != "random":
if 'npy' in args.infile:
input_vals = np.load(args.infile, 'r')
shp = input_vals.shape
shp_str = str(shp).replace(",","x")
newshp_str = ''
if len(shp) == 4: # K,C,R,S -> RSK,C
input_vals = np.transpose(input_vals,(2,3,0,1)).flatten().reshape((shp[2]*shp[3]*shp[0], shp[1]))
newshp_str = str(input_vals.shape).replace(",","x")
print(f"{args.infile},{shp_str},{newshp_str}")
if input_vals.shape[1] % 4 != 0:
print(f"Unfriendly shape {input_vals.shape}, not pruning.")
sys.exit()
# unstructured prune if requested
if args.unstructured > 0.0:
args.unstructured = min(args.unstructured, 1.0)
input_vals = unstructured_prune(input_vals, args.unstructured)
print(f"{args.infile} pruned to {args.unstructured*100.:>.1f} sparsity, shape is {input_vals.shape}")
# calculate some early metrics
sorted_magnitudes = np.sort(np.abs(input_vals), axis=None)
unpruned_magnitude = np.sum(sorted_magnitudes)
num_weights = sorted_magnitudes.size
unstructured_magnitude = np.sum(sorted_magnitudes[int(num_weights/2):])
unstructured_rows_magnitude = magnitude_after_pruning_rows(input_vals, rate=0.5)
simple_2to4 = apply_2_to_4(np.copy(input_vals))
simple_2to4_magnitude = sum_after_2_to_4(input_vals)
tmp_time = time.perf_counter()
simple_2to4_magnitude = sum_after_2_to_4(input_vals)
default_duration = time.perf_counter() - tmp_time
best_magnitude = unstructured_rows_magnitude
best_lost_magnitude = unpruned_magnitude - best_magnitude
base_lost_magnitude = unpruned_magnitude - simple_2to4_magnitude
# prep results table
final_metric = 'efficacy'
if args.unstructured < 0.0:
final_metric = 'min_sparsity'
if args.pretty_print:
print(f"{'strategy':<35},{'magnitude':>15},{final_metric:>15},{'duration':>15}")
print(f"{'unpruned':<35},{unpruned_magnitude:>15.3f},{'-':^15},{'-':^15}")
print(f"{'unstructured':<35},{unstructured_magnitude:>15.3f},{'-':^15},{'-':^15}")
print(f"{'50% rows':<35},{unstructured_rows_magnitude:>15.3f},{'100.0':>15},{'-':^15}")
print(f"{'default 2:4':<35},{simple_2to4_magnitude:>15.3f},{'0.0':>15},{default_duration:>15.3f}")
else:
print(f"strategy,magnitude,{final_metric},duration")
print(f"unpruned,{unpruned_magnitude},-,-")
print(f"unstructured,{unstructured_magnitude},-,-")
print(f"50%_rows,{unstructured_rows_magnitude},100.0,-")
print(f"2:4,{simple_2to4_magnitude},0.0,{default_duration}")
# try the requested strategies
for i,strategy in enumerate(args.strategies):
result = np.copy(input_vals)
np.random.seed(seed=args.seed)
duration = 0.0
min_sparsity = 0.0
strat_split = strategy.split(",")
found_permutation = None
# optimize stripe groups
if strat_split[0] == 'optimize_stripe_groups':
stripe_group_size_in_cols = 8
if len(strat_split) >= 2:
stripe_group_size_in_cols = int(strat_split[1])
escape_attempts = 100
if len(strat_split) >= 3:
escape_attempts = int(strat_split[2])
if args.unstructured >= 0.0: # just perform the search on the current matrix
result,duration,found_permutation = Exhaustive_Search(result, stripe_group_size=stripe_group_size_in_cols, escape_attempts=escape_attempts)
else: # find the minimum sparsity needed to transparently transform the input
min_sparsity,duration = find_minimum_sparsity(result, Exhaustive_Search, stripe_group_size=stripe_group_size_in_cols, escape_attempts=escape_attempts)
result = unstructured_prune(result, min_sparsity/100.0)
# channel swaps
elif strat_split[0] == 'channel_swap':
escape_attempts= 0
if len(strat_split) >= 2:
escape_attempts = int(strat_split[1])
if args.unstructured >= 0.0: # just perform the search on the current matrix
result,duration,found_permutation = Channel_Swap(result, escape_attempts=escape_attempts, verbosity=verbosity)
else: # find the minimum sparsity needed to transparently transform the input
min_sparsity,duration = find_minimum_sparsity(result, Channel_Swap, escape_attempts=escape_attempts, verbosity=verbosity)
result = unstructured_prune(result, min_sparsity/100.0)
# random permutations
elif strat_split[0] == 'random':
if args.unstructured < 0.0: # searching for minimum sparsity not supported for random permutations
continue
num_perms = 10
if len(strat_split) >= 2 and int(strat_split[1]) >= 1:
num_perms = int(strat_split[1])
# try the seeds/permutations
permutation = [c for c in range(result.shape[1])]
best_sum = sum_after_2_to_4(result)
best_perm = permutation.copy()
start_time = time.perf_counter()
for x in range(num_perms):
permutation = np.random.permutation(permutation)
cur_sum = sum_after_2_to_4(result[:,permutation])
if cur_sum > best_sum:
best_sum = cur_sum
best_perm = permutation.copy()
if verbosity > 0:
print(f"\tnew best permutation {x} found with magnitude {best_sum:>15.3f}")
elif verbosity > 5:
print(f"\tpermutation {x} magnitude too low: {cur_sum:>15.3f}")
duration = time.perf_counter() - start_time
result = result[:,best_perm]
found_permutation = best_perm
else:
print(f"Unknown strategy: {strategy}!")
sys.exit()
# report stats for this strategy
cur_mag = sum_after_2_to_4(result)
cur_eff = efficacy(best_lost_magnitude, base_lost_magnitude, unpruned_magnitude - cur_mag)*100.0
final_metric = cur_eff
if args.unstructured < 0.0:
final_metric = min_sparsity
perm_distance = ""
error = None
if args.check_permutation and found_permutation is not None:
recovered_perm = find_permutation(result, input_vals)
error = False
for c in range(len(recovered_perm)):
if recovered_perm[c] != found_permutation[c]:
if verbosity > 0:
print(f"tracked permutation at index {c} was {found_permutation[c]}, but the recovered permutation thought it was {recovered_perm[c]}")
error = True
# if requested, generate permutations that divide the efficacy space into equal steps
if args.intermediate_steps != 0:
magnitude_targets = None
if args.intermediate_steps != 0:
ratios = [step/float(args.intermediate_steps+1) for step in range(1,args.intermediate_steps+1)]
mag_diff = cur_mag - (unpruned_magnitude - base_lost_magnitude)
magnitude_targets = [(unpruned_magnitude - base_lost_magnitude) + mag_diff * ratio for ratio in ratios]
perm_distance, target_permutations = permutation_distance(found_permutation, [c for c in range(result.shape[1])], matrix=input_vals, magnitude_targets=magnitude_targets, debug=False, verbosity=verbosity)
if target_permutations is not None:
for target_permutation in target_permutations:
print(target_permutation)
error_str = ""
if error is not None:
error_str = ", correct"
if error:
error_str = ", mismatch"
if args.pretty_print:
print(f"{strategy:35},{cur_mag:>15.3f},{final_metric:>15.1f},{duration:>15.3f}{error_str:>15}")
else:
strat_string = strategy.replace(",","_")
print(f"{strat_string},{cur_mag},{final_metric},{duration}{error_str}")
if args.print_permutation and found_permutation is not None:
print(found_permutation)
| GeneSplice-main | GeneSplice/apex/apex/contrib/sparsity/permutation_tests/permutation_test.py |
try:
import torch
import bnp
from .batch_norm import BatchNorm2d_NHWC
del torch
del bnp
del batch_norm
except ImportError as err:
print("apex was installed without --bnp flag, contrib.groupbn is not available")
| GeneSplice-main | GeneSplice/apex/apex/contrib/groupbn/__init__.py |
import torch
import numpy as np
from torch.nn.modules.batchnorm import _BatchNorm
import bnp
class bn_NHWC_impl(torch.autograd.Function):
@staticmethod
def forward(ctx, x, s, b, rm, riv, mini_m, mini_riv, ret_cta, mom, epsilon, fuse_relu, is_train, bn_group, my_data, pair_data, magic, pair_data2, pair_data3, fwd_occup, fwd_grid_x, bwd_occup, bwd_grid_x, multi_stream):
if is_train:
ctx.save_for_backward(x, s, b, rm, riv, mini_m, mini_riv)
ctx.epsilon = epsilon
ctx.momentum = mom
ctx.ret_cta = ret_cta
ctx.fuse_relu = fuse_relu
ctx.my_data = my_data
ctx.pair_data = pair_data
ctx.magic = magic
ctx.pair_data2 = pair_data2
ctx.pair_data3 = pair_data3
ctx.bn_group = bn_group
ctx.bwd_occup = bwd_occup
ctx.bwd_grid_x = bwd_grid_x
ctx.multi_stream = multi_stream
res = bnp.bn_fwd_nhwc(x, s, b, rm, riv, mini_m, mini_riv, ret_cta, mom, epsilon, fuse_relu, my_data, pair_data, pair_data2, pair_data3, bn_group, magic, fwd_occup, fwd_grid_x, multi_stream)
return res
else:
return bnp.bn_fwd_eval_nhwc(x, s, b, rm, riv, ret_cta, bn_group, mom, epsilon, fuse_relu)
@staticmethod
def backward(ctx, grad_y):
x, s, b, rm, riv, mini_m, mini_riv = ctx.saved_variables
epsilon = ctx.epsilon
mom = ctx.momentum
ret_cta = ctx.ret_cta
fuse_relu = ctx.fuse_relu
my_data = ctx.my_data
pair_data = ctx.pair_data
magic = ctx.magic
pair_data2 = ctx.pair_data2
pair_data3 = ctx.pair_data3
bn_group = ctx.bn_group
bwd_occup = ctx.bwd_occup
bwd_grid_x = ctx.bwd_grid_x
multi_stream = ctx.multi_stream
dx, dscale, dbias = bnp.bn_bwd_nhwc(x, grad_y, s, b, rm, riv, mini_m, mini_riv, ret_cta, mom, epsilon, fuse_relu, my_data, pair_data, pair_data2, pair_data3, bn_group, magic, bwd_occup, bwd_grid_x, multi_stream)
return dx, dscale, dbias, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None
class bn_addrelu_NHWC_impl(torch.autograd.Function):
@staticmethod
def forward(ctx, x, z, s, b, rm, riv, mini_m, mini_riv, grid_dim_y, ret_cta, mom, epsilon, is_train, bn_group, my_data, pair_data, magic, pair_data2, pair_data3, fwd_occup, fwd_grid_x, bwd_occup, bwd_grid_x, multi_stream):
if is_train:
bitmask = torch.cuda.IntTensor(((x.numel()+31)//32) * 2 * grid_dim_y)
ctx.save_for_backward(x, s, b, rm, riv, mini_m, mini_riv, bitmask)
ctx.epsilon = epsilon
ctx.momentum = mom
ctx.ret_cta = ret_cta
ctx.my_data = my_data
ctx.pair_data = pair_data
ctx.magic = magic
ctx.pair_data2 = pair_data2
ctx.pair_data3 = pair_data3
ctx.bn_group = bn_group
ctx.bwd_occup = bwd_occup
ctx.bwd_grid_x = bwd_grid_x
ctx.multi_stream = multi_stream
res = bnp.bn_addrelu_fwd_nhwc(x, z, s, b, rm, riv, mini_m, mini_riv, bitmask, ret_cta, mom, epsilon, my_data, pair_data, pair_data2, pair_data3, bn_group, magic, fwd_occup, fwd_grid_x, multi_stream)
return res
else:
return bnp.bn_addrelu_fwd_eval_nhwc(x, z, s, b, rm, riv, ret_cta, bn_group, mom, epsilon)
@staticmethod
def backward(ctx, grad_y):
x, s, b, rm, riv, mini_m, mini_riv, bitmask = ctx.saved_variables
epsilon = ctx.epsilon
mom = ctx.momentum
ret_cta = ctx.ret_cta
my_data = ctx.my_data
pair_data = ctx.pair_data
magic = ctx.magic
pair_data2 = ctx.pair_data2
pair_data3 = ctx.pair_data3
bn_group = ctx.bn_group
bwd_occup = ctx.bwd_occup
bwd_grid_x = ctx.bwd_grid_x
multi_stream = ctx.multi_stream
dx, dz, dscale, dbias = bnp.bn_addrelu_bwd_nhwc(x, grad_y, s, b, rm, riv, mini_m, mini_riv, bitmask, ret_cta, mom, epsilon, my_data, pair_data, pair_data2, pair_data3, bn_group, magic, bwd_occup, bwd_grid_x, multi_stream)
return dx, dz, dscale, dbias, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None
class BatchNorm2d_NHWC(_BatchNorm):
# if using BatchNorm2d_NHWC simultaneously with multiple streams set multi_stream to True
def __init__(self, num_features, fuse_relu=False, bn_group=1, max_cta_per_sm=2, cta_launch_margin=12, multi_stream=False):
super(BatchNorm2d_NHWC, self).__init__(num_features)
self.fuse_relu = fuse_relu
self.multi_stream = multi_stream
self.minibatch_mean = torch.cuda.FloatTensor(num_features)
self.minibatch_riv = torch.cuda.FloatTensor(num_features)
#defaut to distributed bn disabled
self.bn_group = bn_group
self.max_cta_per_sm = max_cta_per_sm #used only in training fwd and bwd
self.cta_launch_margin = cta_launch_margin #used only in training fwd and bwd
self.my_data = None
self.pair_data = None
self.pair_data2 = None
self.pair_data3 = None
self.local_rank = 0
self.magic = torch.IntTensor([0])
#calculate cta per sm occupancies
assert(max_cta_per_sm>0) # won't be able to do much with 0 CTAs :)
self.fwd_occupancy = min(bnp.bn_fwd_nhwc_occupancy(), max_cta_per_sm)
self.bwd_occupancy = min(bnp.bn_bwd_nhwc_occupancy(), max_cta_per_sm)
self.addrelu_fwd_occupancy = min(bnp.bn_addrelu_fwd_nhwc_occupancy(), max_cta_per_sm)
self.addrelu_bwd_occupancy = min(bnp.bn_addrelu_bwd_nhwc_occupancy(), max_cta_per_sm)
#calculate grid dimentions based on occupancy numbers
mp_count = torch.cuda.get_device_properties(None).multi_processor_count
self.fwd_grid_dim_x = max(mp_count*self.fwd_occupancy - cta_launch_margin , 1)
self.bwd_grid_dim_x = max(mp_count*self.bwd_occupancy - cta_launch_margin , 1)
self.addrelu_fwd_grid_dim_x = max(mp_count*self.addrelu_fwd_occupancy - cta_launch_margin , 1)
self.addrelu_bwd_grid_dim_x = max(mp_count*self.addrelu_bwd_occupancy - cta_launch_margin , 1)
self.grid_dim_y = (num_features + 63) // 64
# allocate scratch space used by implementation
# TODO: scratch space that is not supposed to be exposed at user code. We only need one time initialization, the
# same buffer could be reused in future iterations. Currently we exposed it here instead of requesting new
# buffer from cache allocator to avoid unnecessary initialization at future iterations.
self.ret_cta = torch.cuda.ByteTensor(8192).fill_(0)
#FIXME: turn pair handles into an array
if bn_group>1:
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
assert(world_size >= bn_group)
assert(world_size % bn_group == 0)
bn_sync_steps = 1
if (bn_group==4):
bn_sync_steps = 2
if (bn_group==8):
bn_sync_steps = 3
self.ipc_buffer = torch.cuda.ByteTensor(bnp.get_buffer_size(bn_sync_steps))
self.my_data = bnp.get_data_ptr(self.ipc_buffer)
# we are walking on very thin ice here by utilizing internal `_share_cuda_()`
self.storage = self.ipc_buffer.storage()
self.share_cuda = self.storage._share_cuda_()
internal_cuda_mem = self.share_cuda
# internal_cuda_mem[1]: ipc_mem_handle
my_handle = torch.cuda.ByteTensor(np.frombuffer(internal_cuda_mem[1], dtype=np.uint8))
# internal_cuda_mem[3]: offset
my_offset = torch.cuda.IntTensor([internal_cuda_mem[3]])
handles_all = torch.empty(world_size, my_handle.size(0), dtype=my_handle.dtype, device=my_handle.device)
handles_l = list(handles_all.unbind(0))
torch.distributed.all_gather(handles_l, my_handle)
offsets_all = torch.empty(world_size, my_offset.size(0), dtype=my_offset.dtype, device=my_offset.device)
offsets_l = list(offsets_all.unbind(0))
torch.distributed.all_gather(offsets_l, my_offset)
#whom do I actually care about? that would be local_rank XOR 1
self.pair_handle = handles_l[local_rank ^ 1].cpu().contiguous()
pair_offset = offsets_l[local_rank ^ 1].cpu()
self.pair_data = bnp.get_remote_data_ptr(self.pair_handle, pair_offset)
if bn_group>2:
self.pair_handle2 = handles_l[local_rank ^ 2].cpu().contiguous()
pair_offset2 = offsets_l[local_rank ^ 2].cpu()
self.pair_data2 = bnp.get_remote_data_ptr(self.pair_handle2, pair_offset2)
if bn_group>4:
self.pair_handle3 = handles_l[local_rank ^ 4].cpu().contiguous()
pair_offset3 = offsets_l[local_rank ^ 4].cpu()
self.pair_data3 = bnp.get_remote_data_ptr(self.pair_handle3, pair_offset3)
#FIXME: get magic value into C code and eliminate from here
self.magic = torch.IntTensor([2])
self.local_rank = local_rank
def forward(self, x, z=None):
if z is not None:
assert(self.fuse_relu==True)
return bn_addrelu_NHWC_impl.apply(x, z,
self.weight, self.bias,
self.running_mean, self.running_var,
self.minibatch_mean, self.minibatch_riv, self.grid_dim_y, self.ret_cta,
self.momentum,
self.eps, self.training, self.bn_group, self.my_data, self.pair_data, (self.magic), self.pair_data2, self.pair_data3,
self.addrelu_fwd_occupancy, self.addrelu_fwd_grid_dim_x,
self.addrelu_bwd_occupancy, self.addrelu_bwd_grid_dim_x,
self.multi_stream)
else:
return bn_NHWC_impl.apply(x,
self.weight, self.bias,
self.running_mean, self.running_var,
self.minibatch_mean, self.minibatch_riv, self.ret_cta,
self.momentum,
self.eps, self.fuse_relu, self.training, self.bn_group, self.my_data, self.pair_data, (self.magic), self.pair_data2, self.pair_data3,
self.fwd_occupancy, self.fwd_grid_dim_x,
self.bwd_occupancy, self.bwd_grid_dim_x,
self.multi_stream)
def __del__(self):
if self.bn_group>1:
bnp.close_remote_data(self.pair_handle)
if self.bn_group>2:
bnp.close_remote_data(self.pair_handle2)
if self.bn_group>4:
bnp.close_remote_data(self.pair_handle3)
| GeneSplice-main | GeneSplice/apex/apex/contrib/groupbn/batch_norm.py |
from .batch_norm import GroupBatchNorm2d | GeneSplice-main | GeneSplice/apex/apex/contrib/cudnn_gbn/__init__.py |
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn import functional as F
from torch import Tensor
import peer_memory_cuda as pm
import cudnn_gbn_lib
from torch.cuda.amp import custom_fwd, custom_bwd
class _GroupBatchNorm2d(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, input, weight, bias, running_mean, running_variance,
minibatch_mean, minibatch_inv_var, momentum, eps, group_size, group_rank, fwd_buffers, bwd_buffers):
ctx.save_for_backward(input, weight, minibatch_mean, minibatch_inv_var)
ctx.eps = eps
ctx.bn_group = group_size
ctx.rank_id = group_rank
ctx.peer_buffers = bwd_buffers
return cudnn_gbn_lib.forward(input, weight, bias, running_mean, running_variance,
minibatch_mean, minibatch_inv_var, momentum, eps, group_size, group_rank, fwd_buffers)
@staticmethod
@custom_bwd
def backward(ctx, grad_output):
x, scale, minibatch_mean, minibatch_inv_var = ctx.saved_variables
eps = ctx.eps
bn_group = ctx.bn_group
rank_id = ctx.rank_id
peer_buffers = ctx.peer_buffers
dx, dscale, dbias = cudnn_gbn_lib.backward(x,
grad_output,
scale,
minibatch_mean,
minibatch_inv_var,
eps,
bn_group,
rank_id,
peer_buffers)
return dx, dscale, dbias, None, None, None, None, None, None, None, None, None, None
class GroupBatchNorm2d(_BatchNorm):
"""
synchronized batch normalization module extented from ``torch.nn.BatchNormNd``
with the added stats reduction across multiple processes.
When running in training mode, the layer reduces stats across process groups
to increase the effective batchsize for normalization layer. This is useful
in applications where batch size is small on a given process that would
diminish converged accuracy of the model.
When running in evaluation mode, the layer falls back to
``torch.nn.functional.batch_norm``.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Example::
>>> sbn = apex.contrib.GroupBatchNorm2d(100).cuda()
>>> inp = torch.randn(10, 100, 14, 14).cuda()
>>> out = sbn(inp)
>>> inp = torch.randn(3, 100, 20).cuda()
>>> out = sbn(inp)
"""
def __init__(self, num_features, group_size, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True):
super(GroupBatchNorm2d, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
self.group_size = group_size
rank = torch.distributed.get_rank()
self.group_id = rank // group_size
self.group_rank = rank % group_size
self.fwd_peer_buffers = self.get_peer_buffers(num_features)
self.bwd_peer_buffers = self.get_peer_buffers(num_features)
self.minibatch_mean = torch.cuda.FloatTensor(num_features)
self.minibatch_inv_var = torch.cuda.FloatTensor(num_features)
def get_peer_buffers(self, num_features):
# group_size * 2 (low-latency algo) * 2 (mean+var) * channels * 4 (float32)
peer_size = self.group_size * 4 * num_features * 4
raw = pm.allocate_raw(peer_size)
# exchange peer pointers with nccl
world_size = torch.distributed.get_world_size()
raw_ipc = pm.get_raw_ipc_address(raw).cuda()
raw_ipcs = [torch.empty_like(raw_ipc) for _ in range(world_size)]
torch.distributed.all_gather(raw_ipcs, raw_ipc)
group_ipcs = [raw_ipcs[x] for x in range(self.group_id * self.group_size, (self.group_id * self.group_size) + self.group_size)]
peer_raw_ipcs = torch.stack(group_ipcs).cpu()
return pm.get_raw_peers(peer_raw_ipcs, self.group_rank, raw)
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError(
"expected 4D input (got {}D input)".format(input.dim())
)
def _check_input_channels(self, input):
if input.size(1) % 8 != 0:
raise ValueError(
"GroupBatchNorm2d number of input channels should be a multiple of 8"
)
def forward(self, input : Tensor) -> Tensor:
# currently only GPU input is supported
if not input.is_cuda:
raise ValueError("GroupBatchNorm2d expected input tensor to be on GPU")
if not input.is_contiguous(memory_format=torch.channels_last):
raise ValueError("GroupBatchNorm2d expected input tensor to be in channels last memory format")
if torch.is_autocast_enabled():
input = input.to(torch.get_autocast_gpu_dtype())
if input.dtype != torch.float16:
raise ValueError("GroupBatchNorm2d expected input tensor in float16")
self._check_input_dim(input)
self._check_input_channels(input)
if not self.training:
# fall back to pytorch implementation for inference
return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, self.momentum, self.eps)
return _GroupBatchNorm2d.apply(input,
self.weight, self.bias,
self.running_mean, self.running_var,
self.minibatch_mean, self.minibatch_inv_var,
self.momentum,
self.eps,
self.group_size,
self.group_rank,
self.fwd_peer_buffers,
self.bwd_peer_buffers)
| GeneSplice-main | GeneSplice/apex/apex/contrib/cudnn_gbn/batch_norm.py |
GeneSplice-main | GeneSplice/apex/apex/contrib/test/__init__.py |
|
GeneSplice-main | GeneSplice/apex/apex/contrib/test/index_mul_2d/__init__.py |
|
import random
import unittest
import torch
HAS_INDEX_MUL_2D_RELU = None
try:
from apex.contrib.index_mul_2d import index_mul_2d
except ImportError as e:
HAS_INDEX_MUL_2D_RELU = False
else:
HAS_INDEX_MUL_2D_RELU = True
@unittest.skipIf(not HAS_INDEX_MUL_2D_RELU, "`apex.contrib.index_mul_2d` is not found.")
class IndexMul2dTest(unittest.TestCase):
def setUp(self, seed=0):
torch.manual_seed(seed)
self.input1_size = random.randint(1, 1000)
self.input2_size = random.randint(1, 100000)
self.feature_size = random.randint(1, 256)
self.input1_float = torch.randn(size=(self.input1_size, self.feature_size),).cuda()
self.input2_float = torch.randn(size=(self.input2_size, self.feature_size),).cuda()
self.index1 = torch.randint(low=0, high=self.input1_size, size=(self.input2_size,)).cuda()
self.input1_float_ = self.input1_float.clone()
self.input2_float_ = self.input2_float.clone()
self.input1_float.requires_grad_()
self.input1_float_.requires_grad_()
self.input2_float.requires_grad_()
self.input2_float_.requires_grad_()
self.input1_half = torch.randn(size=(self.input1_size, self.feature_size),).cuda().half()
self.input2_half = torch.randn(size=(self.input2_size, self.feature_size),).cuda().half()
self.input1_half_ = self.input1_half.clone()
self.input2_half_ = self.input2_half.clone()
self.input1_half.requires_grad_()
self.input2_half.requires_grad_()
self.input1_half_.requires_grad_()
self.input2_half_.requires_grad_()
def test_index_mul_float(self):
out = index_mul_2d(self.input1_float, self.input2_float, self.index1)
energy = (out.float()**2).sum() / out.numel()
force = torch.autograd.grad(
energy,
self.input1_float,
grad_outputs=torch.ones_like(energy),
create_graph=True,
)[0]
loss = (out.float()**2).sum() / out.numel() + (force.float()**2).sum()
loss.backward()
out_ = self.input1_float_[self.index1] * self.input2_float_
energy_ = (out_.float()**2).sum() / out.numel()
force_ = torch.autograd.grad(
energy_,
self.input1_float_,
grad_outputs=torch.ones_like(energy),
create_graph=True,
)[0]
loss = (out_.float()**2).sum() / out_.numel() + (force_.float()**2).sum()
loss.backward()
self.assertTrue(torch.allclose(self.input1_float, self.input1_float_, atol=1e-3, rtol=1e-3, equal_nan=True))
self.assertTrue(torch.allclose(self.input2_float, self.input2_float_, atol=1e-3, rtol=1e-3, equal_nan=True))
self.assertTrue(torch.allclose(self.input1_float.grad, self.input1_float_.grad, atol=1e-3, rtol=1e-3, equal_nan=True))
self.assertTrue(torch.allclose(self.input2_float.grad, self.input2_float_.grad, atol=1e-3, rtol=1e-3, equal_nan=True))
def test_index_mul_half(self):
out = index_mul_2d(self.input1_half, self.input2_half, self.index1)
energy = (out.float()**2).sum() / out.numel()
force = torch.autograd.grad(
energy,
self.input1_half,
grad_outputs=torch.ones_like(energy),
create_graph=True,
)[0]
loss = (out.float()**2).sum() / out.numel() + (force.float()**2).sum()
loss.backward()
out_ = self.input1_half_[self.index1] * self.input2_half_
energy_ = (out_.float()**2).sum() / out.numel()
force_ = torch.autograd.grad(
energy_,
self.input1_half_,
grad_outputs=torch.ones_like(energy),
create_graph=True,
)[0]
loss = (out_.float()**2).sum() / out_.numel() + (force_.float()**2).sum()
loss.backward()
self.assertTrue(torch.allclose(self.input1_half, self.input1_half_, atol=1e-3, rtol=1e-3, equal_nan=True))
self.assertTrue(torch.allclose(self.input2_half, self.input2_half_, atol=1e-3, rtol=1e-3, equal_nan=True))
self.assertTrue(torch.allclose(self.input1_half.grad, self.input1_half_.grad, atol=1e-3, rtol=1e-3, equal_nan=True))
self.assertTrue(torch.allclose(self.input2_half.grad, self.input2_half_.grad, atol=1e-3, rtol=1e-3, equal_nan=True))
if __name__ == '__main__':
unittest.main()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/index_mul_2d/test_index_mul_2d.py |
import copy
import typing
import unittest
import torch
import torch.nn as nn
from torch.testing._internal import common_utils
SKIP_TEST = None
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
try:
from apex.contrib.cudnn_gbn import GroupBatchNorm2d as GBN
except ImportError as e:
SKIP_TEST = e
# Usage: python /path/to/cudnn_gbn/test_gbn_with_two_gpus.py
input_shapes = [
[1, 1024, 48, 72],
[1, 128, 192, 288],
[1, 128, 384, 576],
[1, 1536, 48, 72],
[1, 2048, 48, 72],
[1, 256, 1, 1],
[1, 256, 192, 288],
[1, 256, 384, 576],
[1, 256, 48, 72],
[1, 256, 96, 144],
[1, 32, 384, 576],
[1, 48, 192, 288],
[1, 64, 384, 576],
[1, 728, 48, 72],
[1, 728, 96, 144],
]
class BNModelRef(nn.Module):
def __init__(self, num_features, num_layers=1000):
super().__init__()
self.fwd = nn.Sequential(
*[
nn.BatchNorm2d(num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
for _ in range(num_layers)
]
)
def forward(self, x):
return self.fwd(x)
class BNModel(nn.Module):
def __init__(self, num_features, num_layers=1000):
super().__init__()
self.fwd = nn.Sequential(
*[
GBN(num_features, group_size=2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
for _ in range(num_layers)
]
)
def forward(self, x):
return self.fwd(x)
def get_rand_tensors(global_shape, device):
inp_t = torch.rand(global_shape, dtype=torch.float32, device=device).to(memory_format=torch.channels_last)
weight = torch.rand(global_shape[1], dtype=torch.float32, device=device)
bias = torch.rand(global_shape[1], dtype=torch.float32, device=device)
_grad_out = torch.rand(global_shape, dtype=torch.float32, device=device).to(memory_format=torch.channels_last)
return inp_t, weight, bias, _grad_out
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class TestCudnnGBN(NcclDistributedTestBase):
def _prep(self):
torch.cuda.manual_seed(333)
torch.manual_seed(333)
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 2)
@torch.backends.cudnn.flags(enabled=True, benchmark=True)
def _test_cudnn_gbn(
self,
num_layers: int,
shape: typing.List[int],
*,
memory_format: torch.memory_format = torch.channels_last,
) -> None:
global_shape = copy.deepcopy(shape)
global_shape[0] = self.world_size
device = torch.device("cuda", self.rank)
cudnn_gbn_model = BNModel(
num_features=shape[1],
num_layers=num_layers,
).to(device=device, memory_format=memory_format)
ref_model = BNModelRef(
num_features=shape[1],
num_layers=num_layers,
).to(device=device, memory_format=memory_format)
input, weight, bias, grad_out = get_rand_tensors(global_shape, device)
with torch.no_grad():
ref_model.fwd[0].weight.copy_(weight)
ref_model.fwd[0].bias.copy_(bias)
cudnn_gbn_model.fwd[0].weight.copy_(weight)
cudnn_gbn_model.fwd[0].bias.copy_(bias)
ref_input = input.clone().detach().requires_grad_()
input = input[self.rank : self.rank + 1, ...].clone().detach().requires_grad_()
ref_grad_out = grad_out.half().clone().detach()
grad_out = grad_out[self.rank : self.rank + 1, ...].half().clone().detach()
with torch.cuda.amp.autocast():
out = cudnn_gbn_model(input)
ref_out = ref_model(ref_input.half())
out.backward(grad_out)
ref_out.backward(ref_grad_out)
kwargs = {"rtol": 3.5e-3, "atol": 3e-2, "msg": f"shape: {shape}"}
torch.testing.assert_close(ref_out[self.rank : self.rank + 1], out, **kwargs)
torch.testing.assert_close(ref_input.grad[self.rank : self.rank + 1], input.grad, **kwargs)
# compensating the averaging over processes done by DDP
# in order to produce mathematically equivalent result
# https://github.com/NVIDIA/apex/issues/134#issuecomment-458307368
torch.testing.assert_close(
ref_model.fwd[0].weight.grad / self.world_size, cudnn_gbn_model.fwd[0].weight.grad, **kwargs
)
torch.testing.assert_close(
ref_model.fwd[0].bias.grad / self.world_size, cudnn_gbn_model.fwd[0].bias.grad, **kwargs
)
def test_cudnngbn(self):
if self.world_size != 2:
self.skipTest(f"This test is written for world_size of 2 but {self.world_size}")
for shape in input_shapes:
self._prep()
self._test_cudnn_gbn(1, shape)
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/cudnn_gbn/test_cudnn_gbn_with_two_gpus.py |
GeneSplice-main | GeneSplice/apex/apex/contrib/test/cudnn_gbn/__init__.py |
|
import unittest
import torch
import torch.nn.functional as F
reference_available = True
try:
from torchvision.ops.focal_loss import sigmoid_focal_loss
except ImportError:
reference_available = False
SKIP_TEST = None
try:
from apex.contrib.focal_loss import focal_loss
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
@unittest.skipIf(not reference_available, "Reference implementation `torchvision.ops.focal_loss.sigmoid_focal_loss` is not available.")
class FocalLossTest(unittest.TestCase):
N_SAMPLES = 12
N_CLASSES = 8
ALPHA = 0.24
GAMMA = 2.0
REDUCTION = "sum"
def test_focal_loss(self) -> None:
if not reference_available:
self.skipTest("This test needs `torchvision` for `torchvision.ops.focal_loss.sigmoid_focal_loss`.")
else:
x = torch.randn(FocalLossTest.N_SAMPLES, FocalLossTest.N_CLASSES).cuda()
with torch.no_grad():
x_expected = x.clone()
x_actual = x.clone()
x_expected.requires_grad_()
x_actual.requires_grad_()
classes = torch.randint(0, FocalLossTest.N_CLASSES, (FocalLossTest.N_SAMPLES,)).cuda()
with torch.no_grad():
y = F.one_hot(classes, FocalLossTest.N_CLASSES).float()
expected = sigmoid_focal_loss(
x_expected,
y,
alpha=FocalLossTest.ALPHA,
gamma=FocalLossTest.GAMMA,
reduction=FocalLossTest.REDUCTION,
)
actual = sum([focal_loss.FocalLoss.apply(
x_actual[i:i+1],
classes[i:i+1].long(),
torch.ones([], device="cuda"),
FocalLossTest.N_CLASSES,
FocalLossTest.ALPHA,
FocalLossTest.GAMMA,
0.0,
) for i in range(FocalLossTest.N_SAMPLES)])
# forward parity
torch.testing.assert_close(expected, actual)
expected.backward()
actual.backward()
# grad parity
torch.testing.assert_close(x_expected.grad, x_actual.grad)
if __name__ == "__main__":
torch.manual_seed(42)
unittest.main()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/focal_loss/test_focal_loss.py |
GeneSplice-main | GeneSplice/apex/apex/contrib/test/focal_loss/__init__.py |
|
GeneSplice-main | GeneSplice/apex/apex/contrib/test/xentropy/__init__.py |
|
import unittest
import random
import time
import numpy as np
import torch
SKIP_TEST = None
try:
from apex.contrib import xentropy as label_smoothing
except ImportError as e:
SKIP_TEST = e
def label_smoothing_raw(x, target, padding_idx, smoothing):
logprobs = torch.nn.functional.log_softmax(x, dim=-1, dtype=torch.float32)
non_pad_mask = (target != padding_idx)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)[non_pad_mask]
smooth_loss = -logprobs.mean(dim=-1)[non_pad_mask]
loss = (1.0 - smoothing) * nll_loss + smoothing * smooth_loss
return loss
def label_smoothing_opt_1(x, target, padding_idx, smoothing):
logprobs = torch.nn.functional.log_softmax(x, dim=-1, dtype=torch.float32)
pad_mask = (target == padding_idx)
ll_loss = logprobs.gather(dim=-1, index=target.unsqueeze(1)).squeeze(1)
smooth_loss = logprobs.mean(dim=-1)
loss = (smoothing - 1.0) * ll_loss - smoothing * smooth_loss
loss.masked_fill_(pad_mask, 0)
return loss
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class LabelSmoothingTest(unittest.TestCase):
def setUp(self, seed=1234):
super().setUp()
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# Set pytorch print precision
torch.set_printoptions(precision=10)
def gen_test_inputs(self, N, T, H, smoothing, padding_idx):
logits = torch.randn((N*T, H), dtype=torch.half, device='cuda',
requires_grad=True)
labels = torch.randint(0, H, [N*T], device='cuda')
for i in random.sample(range(N*T), N*T//6):
labels[i] = padding_idx
half_to_float = (logits.dtype == torch.half)
return logits, labels, half_to_float
def print_max_diff_elem(self, ref, tst):
ref, tst = ref.flatten(), tst.flatten()
diff = (ref - tst).abs().max()
idx = (ref - tst).abs().argmax()
print("Max atol idx: {}, diff: {:.6f}, ref: {:.6f}, tst: {:.6f}".format(
idx, diff, ref[idx], tst[idx]))
def test_label_smoothing_function(self):
# Set label smoothing configuration
smoothing, padding_idx = 0.1, 0
N, T, H = 128, 74, 32320
iters = 10
loss_func = label_smoothing.SoftmaxCrossEntropyLoss.apply
for i in range(iters):
logits, labels, half_to_float = self.gen_test_inputs(
N, T, H, smoothing, padding_idx)
# Run original softmax cross entropy with label smoothing
logits.grad = None
losses = label_smoothing_raw(logits, labels, padding_idx, smoothing)
loss = losses.sum()
loss.backward()
ref_loss = loss.clone().detach()
ref_grad = logits.grad.clone().detach()
# Run optimized softmax cross entropy with label smoothing
logits.grad = None
losses = loss_func(logits, labels, smoothing, padding_idx, half_to_float)
loss = losses.sum()
loss.backward()
val_loss = loss.clone().detach()
val_grad = logits.grad.clone().detach()
# Validate
self.print_max_diff_elem(ref_grad, val_grad)
self.assertTrue(torch.allclose(ref_loss, val_loss, atol=1e-5, rtol=1e-5))
self.assertTrue(torch.allclose(ref_grad, val_grad, atol=1e-5, rtol=1e-5))
def test_label_smoothing_perf(self):
# Set label smoothing configuration
smoothing, padding_idx = 0.1, 0
N, T, H = 128, 74, 32320
iters = 1000
loss_func = label_smoothing.SoftmaxCrossEntropyLoss.apply
print()
logits, labels, half_to_float = self.gen_test_inputs(
N, T, H, smoothing, padding_idx)
# Run original softmax cross entropy with label smoothing
torch.cuda.synchronize()
ts = time.time()
for i in range(iters):
logits.grad = None
losses = label_smoothing_raw(logits, labels, padding_idx, smoothing)
loss = losses.sum() / N
loss.backward()
torch.cuda.synchronize()
print("Raw time {:.2f} s elapsed for {} iterations, norm {:.4f}".format(
time.time() - ts, iters, logits.grad.norm()))
# Run optimized softmax cross entropy with label smoothing
torch.cuda.synchronize()
ts = time.time()
for i in range(iters):
logits.grad = None
losses = loss_func(logits, labels, smoothing, padding_idx, half_to_float)
loss = losses.sum() / N
loss.backward()
torch.cuda.synchronize()
print("Opt time {:.2f} s elapsed for {} iterations, norm {:.4f}".format(
time.time() - ts, iters, logits.grad.norm()))
if __name__ == '__main__':
unittest.main()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/xentropy/test_label_smoothing.py |
import unittest
import os
import torch
from torch.testing._internal import common_utils
from torch.testing._internal.common_device_type import instantiate_device_type_tests
SKIP_TEST = None
try:
from apex import fused_dense
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class FusedDenseTest(common_utils.TestCase):
def _test_fused_dense(self, dtype, seed=0):
os.environ["TORCH_ALLOW_TF32_CUBLAS_OVERRIDE"] = "0"
torch.manual_seed(seed)
seq_length = 512
sequences = 3
hidden_dim = 1024
ref_inputs = torch.randn(sequences*seq_length, hidden_dim,
dtype=dtype, device=torch.device("cuda")).requires_grad_(True)
tst_inputs = ref_inputs.clone().detach().requires_grad_(True)
dense = fused_dense.FusedDense(1024, 3072)
dense.to(dtype=dtype)
dense.cuda()
y_tst = dense(tst_inputs)
y_ref = torch.matmul(ref_inputs, dense.weight.t())+dense.bias
dy = torch.randn_like(y_tst).to(dtype=dtype)
y_tst.backward(dy)
dw_ref = torch.matmul(dy.t(), ref_inputs)
dx_ref = torch.matmul(dy, dense.weight.clone())
db_ref = dy.sum(0, False)
torch.testing.assert_close(
ref_inputs, tst_inputs, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(
y_ref, y_tst, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(
dw_ref, dense.weight.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(
dx_ref, tst_inputs.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(
db_ref, dense.bias.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
@common_utils.parametrize("dtype", [torch.half, torch.float, torch.bfloat16])
def test_fused_dense(self, dtype):
self._test_fused_dense(dtype)
instantiate_device_type_tests(FusedDenseTest, globals(), only_for=("cuda",))
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/fused_dense/test_fused_dense.py |
GeneSplice-main | GeneSplice/apex/apex/contrib/test/layer_norm/__init__.py |
|
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.layer_norm.layer_norm import FastLayerNorm
import fast_layer_norm as fln
except ImportError as e:
SKIP_TEST = e
class GPUTimer:
def __init__(self, stream):
self.start_ = torch.cuda.Event(enable_timing=True)
self.stop_ = torch.cuda.Event(enable_timing=True)
self.stream_ = stream
def start(self):
self.stream_.record_event(self.start_)
def stop(self):
self.stream_.record_event(self.stop_)
def sync(self):
self.stream_.synchronize()
def millis(self):
return self.start_.elapsed_time(self.stop_)
def size_in_bytes(t):
return torch.numel(t) * t.element_size()
def metrics(y_ref, y, epsilon=1e-6):
y_ref = y_ref.float()
y = y.float()
relerr, mse = (
(y_ref - y).abs().sum() / (y_ref.abs().sum() + epsilon),
(y_ref - y).square().mean(),
)
return relerr.item(), mse.item()
device = torch.device("cuda")
fp32 = torch.float32
fp16 = torch.float16
bf16 = torch.bfloat16
def backward_(dz, x, mu, rs, gamma):
wtype = gamma.dtype
itype = x.dtype
otype = dz.dtype
ctype = mu.dtype
mu = mu.unsqueeze(1)
rs = rs.unsqueeze(1)
hidden_size = gamma.numel()
y = rs * (x.to(ctype) - mu)
dbeta = dz.view(-1, hidden_size).sum(0, dtype=ctype)
dgamma = (dz * y).view(-1, hidden_size).sum(0, dtype=ctype)
dy = dz.view(-1, hidden_size).to(ctype) * gamma.unsqueeze(0).to(ctype)
mdy = dy.mean(1, keepdim=True, dtype=ctype)
mdyy = (dy * y).mean(1, keepdim=True, dtype=ctype)
dx = rs * (dy - mdyy * y - mdy)
return dx.to(itype), dgamma.to(wtype), dbeta.to(wtype)
def benchmark_(S, B, hidden_size, itype, wtype, runs=100):
epsilon = 1e-5
x = torch.randn((S * B, hidden_size), dtype=itype, device=device)
beta = torch.randn(hidden_size, dtype=wtype, device=device)
gamma = torch.randn(hidden_size, dtype=wtype, device=device)
dz = torch.randn(x.shape, dtype=wtype, device=device)
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
timer = GPUTimer(stream)
# warmup
for r in range(runs):
z, mu, rsigma = fln.ln_fwd(x, gamma, beta, epsilon)
timer.start()
for r in range(runs):
z, mu, rsigma = fln.ln_fwd(x, gamma, beta, epsilon)
timer.stop()
timer.sync()
total_bytes_fwd = sum([size_in_bytes(t) for t in [x, z, gamma, beta, mu, rsigma]])
ms_fwd = timer.millis() / runs
print(
"[FWD] Time: {:.4f}ms Throughput: {:.4f} GB/sec".format(
ms_fwd, total_bytes_fwd * 1e-6 / ms_fwd
)
)
timer.start()
for r in range(runs):
dx, dgamma, dbeta, dbp, dgp = fln.ln_bwd(dz, x, mu, rsigma, gamma)
timer.stop()
timer.sync()
total_bytes_bwd = sum(
[
size_in_bytes(t)
for t in [dz, x, mu, rsigma, gamma, dx, dgamma, dbeta, dbp, dbp, dgp, dgp]
]
)
ms_bwd = timer.millis() / runs
print(
"[BWD] Time: {:.4f}ms Throughput: {:.4f} GB/sec".format(
ms_bwd, total_bytes_bwd * 1e-6 / ms_bwd
)
)
def _test_impl(S, B, hidden_size, itype, wtype, ctype=fp32):
seed = 1243
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
otype = wtype
print("========================================================")
print(f"S={S} B={B} Hidden={hidden_size} {itype} {wtype}")
print("--------------------------------------------------------")
x = torch.randn(S * B, hidden_size, dtype=itype, device=device)
gamma = torch.randn(hidden_size, dtype=wtype, device=device) * 0.2
beta = torch.randn(hidden_size, dtype=wtype, device=device) * 0.2
epsilon = 1e-5
x.requires_grad = True
gamma.requires_grad = True
beta.requires_grad = True
mu_ref = x.mean(1, dtype=ctype, keepdim=True)
v = torch.square(x - mu_ref).mean(1, dtype=ctype, keepdim=True)
rs_ref = torch.rsqrt(v + epsilon)
y_ref = rs_ref * (x.to(ctype) - mu_ref)
z_ref = (gamma.unsqueeze(0) * (y_ref).to(otype) + beta.unsqueeze(0)).to(otype)
mu_ref = mu_ref.flatten()
rs_ref = rs_ref.flatten()
dz = torch.randn_like(z_ref)
# z_ref.backward(dz)
# dx_ref = x.grad
# dgamma_ref = gamma.grad
# dbeta_ref = beta.grad
dx_ref, dg_ref, db_ref = backward_(dz, x, mu_ref, rs_ref, gamma)
z, mu, rs = fln.ln_fwd(x, gamma, beta, epsilon)
dx, dg, db, dg_part, db_part = fln.ln_bwd(dz, x, mu, rs, gamma)
re_z, mse_z = metrics(z_ref, z)
re_mu, mse_mu = metrics(mu_ref, mu)
re_rs, mse_rs = metrics(rs_ref, rs)
re_dx, mse_dx = metrics(dx_ref, dx)
re_dg, mse_dg = metrics(dg_ref, dg)
re_db, mse_db = metrics(db_ref, db)
print(f" z: relerr={re_z :.4e} mse={mse_z :.4e}")
print(f"mu: relerr={re_mu:.4e} mse={mse_mu:.4e}")
print(f"rs: relerr={re_mu:.4e} mse={mse_mu:.4e}")
print(f"dx: relerr={re_dx:.4e} mse={mse_dx:.4e}")
print(f"dg: relerr={re_dg:.4e} mse={mse_dg:.4e}")
print(f"db: relerr={re_db:.4e} mse={mse_db:.4e}")
def check_err(x, relerr):
tol = 1e-3 if x.dtype == torch.float16 else 5e-6
return relerr < tol
return [
check_err(x, re)
for x, re in zip([z, mu, rs, dx, dg, db], [re_z, re_mu, re_rs, re_dx, re_dg, re_db])
]
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class TestFastLayerNorm(unittest.TestCase):
# TODO(crcrpar): Try `torch.testing.assert_close` instead and migrate to it if it's working.
def assertAll(self, l):
if not all(l):
print(l)
for x in l:
self.assertTrue(x)
def test_all_configs(self):
hidden_sizes = [
768,
1024,
1536,
2048,
2304,
3072,
3840,
4096,
5120,
6144,
8192,
10240,
12288,
12800,
14336,
15360,
16384,
18432,
20480,
24576,
25600,
30720,
32768,
40960,
49152,
65536,
]
for h in hidden_sizes:
with self.subTest(f"hidden_size={h}"):
self.assertAll(_test_impl(256, 2, h, fp32, fp32))
self.assertAll(_test_impl(256, 2, h, fp16, fp16))
self.assertAll(_test_impl(256, 2, h, fp32, fp16))
self.assertAll(_test_impl(256, 2, h, bf16, bf16))
self.assertAll(_test_impl(256, 2, h, fp32, bf16))
def test_run_benchmark(self):
for (S, B, hidden_size, runs) in (
(512, 32, 768, 1000),
(512, 32, 1024, 1000),
(512, 8, 4096, 1000),
(512, 8, 5120, 1000),
(512, 8, 6144, 1000),
(256, 2, 20480, 500),
(256, 2, 25600, 500),
(256, 2, 40960, 250),
(256, 2, 65536, 250),
):
with self.subTest(f"(S, B, hidden_size)=({S}, {B}, {hidden_size})"):
benchmark_(S, B, hidden_size, fp16, fp16, runs)
def test_compat_with_autocast(self):
autocast_dtypes = (
(torch.half, torch.bfloat16) if torch.cuda.is_bf16_supported() else (torch.half,)
)
input_shape = (512, 32, 768)
layer_norm = FastLayerNorm(input_shape[-1]).cuda()
input = torch.randn(input_shape).cuda()
for dtype in autocast_dtypes:
layer_norm.zero_grad(set_to_none=True)
with self.subTest(f"autocast_dtype={dtype}"):
with torch.cuda.amp.autocast(enabled=True, dtype=dtype):
out = layer_norm(input)
self.assertEqual(dtype, out.dtype)
grad = torch.randn_like(out)
out.backward(grad)
self.assertEqual(torch.float32, layer_norm.weight.grad.dtype)
if __name__ == "__main__":
unittest.main()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/layer_norm/test_fast_layer_norm.py |
import os
import inspect
import torch
from torch.cuda.amp import GradScaler
from torch.testing._internal import common_utils
from apex.parallel.distributed import flat_dist_call
from apex.contrib.optimizers.distributed_fused_lamb import DistributedFusedLAMB
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
def get_init_weights_func():
@torch.no_grad()
def init_weights(m):
if isinstance(m, torch.nn.Linear):
m.weight.fill_(1.0)
return init_weights
class ModelFoo(torch.nn.Module):
def __init__(self):
super(ModelFoo, self).__init__()
self.linear = torch.nn.Linear(128, 128, bias = False)
self.loss = torch.nn.MSELoss()
def forward(self, input_tensor, gt):
y = self.linear(input_tensor)
loss = self.loss(y, gt)
return loss
# A test for distributed fused Lamb optimizer: run several iterations and see if loss decreases
# There are two instances of the same test because based on `world_size` the optimizer decides what collectives operation to use.
# If torch.distributed.get_world_size() == torch.cuda.device_count() it uses only `all_gather`.
# If torch.distributed.get_world_size() < torch.cuda.device_count() it uses both `all_gather` and `reduce_scatter`.
class NcclDistributedFusedLAMB(NcclDistributedTestBase):
@property
def world_size(self) -> int:
return torch.cuda.device_count()
@common_utils.parametrize("no_copy", [False, True])
@common_utils.parametrize("opt_kwargs", [
dict(overlap_reductions=True, dwu_num_blocks=2, dwu_num_chunks=2,
fused_norm=False, fuse_scale=False, clip_after_ar=True,
full_ar=False),
dict(overlap_reductions=False, dwu_num_blocks=1, dwu_num_chunks=1,
fused_norm=True, fuse_scale=True, clip_after_ar=False),
])
def test_distributed_fused_lamb(self, no_copy, opt_kwargs):
if no_copy and 'no_copy' not in inspect.getfullargspec(torch.distributed.reduce_scatter).args:
self.skipTest("does not support no_copy")
if no_copy and 'no_copy' not in inspect.getfullargspec(torch.distributed.all_gather).args:
self.skipTest("does not support no_copy")
assert torch.distributed.is_initialized()
gpu_count = torch.distributed.get_world_size()
init_scale = 100
lr = torch.tensor(0.1).cuda()
grad_scaler = GradScaler(init_scale=init_scale, growth_interval=1000)
model = ModelFoo()
model = model.cuda().half()
model.apply(get_init_weights_func())
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta', 'LayerNorm']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if 'full_ar' not in opt_kwargs:
opt_kwargs['full_ar'] = gpu_count == torch.cuda.device_count()
# Aidyn-A: not sure what parameters are the best for testing purposes,
# setting up whatever I think appropriate.
optimizer = DistributedFusedLAMB(
optimizer_grouped_parameters,
lr=0.1,
betas=(0.9, 0.9),
eps=1e-6,
max_grad_norm=1.0,
dwu_group_size=gpu_count,
dwu_num_rs_pg=1,
dwu_num_ar_pg=1,
dwu_num_ag_pg=1,
use_nvlamb=False,
set_param_views_to_flat_buffer=False,
e5m2_allgather=False,
**opt_kwargs
)
optimizer.set_global_scale(init_scale)
optimizer._reduce_scatter_no_copy = no_copy
optimizer._all_gather_no_copy = no_copy
flat_dist_call([param.data for param in model.parameters()], torch.distributed.broadcast, (0,) )
x = torch.randn(4096, 128, dtype=torch.float16).cuda()
y = torch.randn(4096, 128, dtype=torch.float16).cuda()
losses = []
for _ in range(10):
loss = model(x, y)
optimizer._lazy_init_stage1()
grad_scaler.scale(loss).backward()
optimizer._lazy_init_stage2()
optimizer._lr = lr
optimizer.complete_reductions()
optimizer.set_global_scale(grad_scaler._get_scale_async())
grad_scaler.step(optimizer)
grad_scaler.update()
optimizer.zero_grad(set_to_none=True)
losses.append(loss.item())
self.assertTrue(losses == sorted(losses, reverse=True))
common_utils.instantiate_parametrized_tests(NcclDistributedFusedLAMB)
class NcclDistributedFusedLAMB_partial_ar(NcclDistributedFusedLAMB):
@property
def world_size(self) -> int:
return max(torch.cuda.device_count()-1, 1)
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/optimizers/test_distributed_fused_lamb.py |
GeneSplice-main | GeneSplice/apex/apex/contrib/test/optimizers/__init__.py |
|
from contextlib import contextmanager
import io
import unittest
import torch
from torch.testing._internal import common_utils
SKIP_TEST = None
try:
from apex.contrib.optimizers.distributed_fused_adam import DistributedFusedAdam
except ImportError as e:
SKIP_TEST = e
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
class SimpleModel(torch.nn.Module):
def __init__(self, num_layers, size):
super().__init__()
self.params = torch.nn.ParameterList([
torch.nn.Parameter(torch.rand(1, size) + 1)
for _ in range(num_layers)
])
def forward(self, x):
y = 0
for i, param in enumerate(self.params):
y += (i+1) * param * x
return y
def make_models(
num_layers,
size,
adam_w_mode=True,
model_dtype=torch.float32,
optim_dtype=None,
grad_sync_dtype=None,
param_sync_dtype=None,
device='cuda',
overlap_communication=True,
contiguous_buffers=False,
store_params=False,
store_param_remainders=False,
):
# Construct models with same parameters
ref_model = SimpleModel(num_layers, size).to(dtype=model_dtype, device=device)
dist_model = SimpleModel(num_layers, size).to(dtype=model_dtype, device=device)
with torch.no_grad():
for ref_param, dist_param in zip(dist_model.parameters(),
ref_model.parameters()):
dist_param.copy_(ref_param)
# Initialize reference model with data-parallelism
rank = torch.distributed.get_rank()
ref_model = torch.nn.parallel.DistributedDataParallel(
ref_model,
device_ids=[rank] if device=='cuda' else None,
output_device=rank if device=='cuda' else None,
)
# Construct optimizers with same hyperparameters
if optim_dtype is None:
optim_dtype = model_dtype
optim_args = dict(lr=0.1, betas=(0.1,0.2), eps=0.25, weight_decay=0.1)
ref_optim_class = torch.optim.AdamW if adam_w_mode else torch.optim.Adam
ref_optim = ref_optim_class(
[
{'params': list(ref_model.parameters())[1::2], 'lr': 0.2},
{'params': list(ref_model.parameters())[0::2]},
],
**optim_args,
)
dist_optim = DistributedFusedAdam(
[
{'params': list(dist_model.parameters())[1::2], 'lr': 0.2},
{'params': list(dist_model.parameters())[0::2]},
],
adam_w_mode=adam_w_mode,
overlap_grad_sync=overlap_communication,
overlap_param_sync=overlap_communication,
bucket_cap_mb=71/(4*1024*1024),
dtype=optim_dtype,
grad_sync_dtype=grad_sync_dtype,
param_sync_dtype=param_sync_dtype,
contiguous_param_buffer=contiguous_buffers,
contiguous_grad_buffer=contiguous_buffers,
store_params=store_params,
store_param_remainders=store_param_remainders,
**optim_args,
)
return ref_model, ref_optim, dist_model, dist_optim
@contextmanager
def dummy_context():
try:
yield
finally:
pass
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class TestDistributedFusedAdam(NcclDistributedTestBase):
seed = 1234
def test_matches_pytorch(
self,
rtol=None,
atol=None,
num_layers=11,
layer_size=7,
batch_size=3,
num_steps=3,
micro_batch_steps=3,
adam_w_mode=True,
overlap_communication=True,
use_nosync=True,
model_dtype=torch.float32,
optim_dtype=None,
grad_sync_dtype=None,
param_sync_dtype=None,
device='cuda',
contiguous_buffers=False,
store_params=False,
store_param_remainders=False,
):
torch.manual_seed(self.seed + self.rank)
# Identical models with data-parallel and ZeRO
ref_model, ref_optim, dist_model, dist_optim = make_models(
num_layers,
layer_size,
adam_w_mode=adam_w_mode,
model_dtype=model_dtype,
optim_dtype=optim_dtype,
grad_sync_dtype=grad_sync_dtype,
param_sync_dtype=param_sync_dtype,
device=device,
overlap_communication=overlap_communication,
contiguous_buffers=contiguous_buffers,
store_params=store_params,
store_param_remainders=store_param_remainders,
)
# Training loop
for step in range(num_steps):
# Reset gradients
ref_optim.zero_grad()
dist_optim.zero_grad()
# Forward and backward passes
for micro_step in range(micro_batch_steps):
# Synthetic data
x = torch.rand(batch_size, layer_size) - 0.5
dy = torch.rand_like(x) - 0.5
x = x.to(dtype=model_dtype, device=device)
dy = dy.to(dtype=model_dtype, device=device)
# Reference implementation
x_ref = x.detach().clone().requires_grad_(True)
y_ref = ref_model(x_ref)
y_ref.backward(dy)
# Distributed implementation
x_dist = x.detach().clone().requires_grad_(True)
y_dist = dist_model(x_dist)
backward_context = dummy_context
if use_nosync and micro_step < micro_batch_steps-1:
backward_context = dist_optim.no_sync
with backward_context():
y_dist.backward(dy)
# Check that data tensors match
torch.testing.assert_close(
y_dist, y_ref, rtol=rtol, atol=atol)
torch.testing.assert_close(
x_dist.grad, x_ref.grad, rtol=rtol, atol=atol)
# Optimization step
ref_optim.step()
dist_optim.step()
# Check that parameters match
for ref_param, dist_param in zip(ref_model.parameters(),
dist_model.parameters()):
torch.testing.assert_close(
dist_param, ref_param, rtol=rtol, atol=atol)
def test_matches_pytorch_l2_reg(self):
self.test_matches_pytorch(adam_w_mode=False)
def test_matches_pytorch_no_overlap(self):
self.test_matches_pytorch(
overlap_communication=False,
use_nosync=False,
)
def test_matches_pytorch_sync_every_step(self):
self.test_matches_pytorch(use_nosync=False)
def test_matches_pytorch_contiguous_buffers(self):
self.test_matches_pytorch(contiguous_buffers=True)
def test_matches_pytorch_fp64(self):
self.test_matches_pytorch(
rtol=1.3e-6,
atol=1e-5,
model_dtype=torch.float64,
optim_dtype=torch.float32,
)
def test_matches_pytorch_fp16(self):
self.test_matches_pytorch(
rtol=5e-3,
atol=1e-5,
micro_batch_steps=1,
model_dtype=torch.float16,
optim_dtype=torch.float16,
)
def test_matches_pytorch_bf16(self):
self.test_matches_pytorch(
rtol=5e-2,
atol=1e-5,
micro_batch_steps=1,
model_dtype=torch.bfloat16,
optim_dtype=torch.bfloat16,
)
def test_matches_pytorch_fp16_params(self):
self.test_matches_pytorch(
rtol=5e-3,
atol=1e-5,
micro_batch_steps=1,
model_dtype=torch.float16,
optim_dtype=torch.float32,
param_sync_dtype=torch.float16,
store_params=True,
)
def test_matches_pytorch_bf16_grads(self):
self.test_matches_pytorch(
rtol=5e-2,
atol=1e-5,
micro_batch_steps=1,
model_dtype=torch.float32,
optim_dtype=torch.float32,
grad_sync_dtype=torch.bfloat16,
)
def test_matches_pytorch_bf16_param_remainders(self):
self.test_matches_pytorch(
rtol=5e-2,
atol=1e-5,
micro_batch_steps=1,
model_dtype=torch.bfloat16,
optim_dtype=torch.float32,
param_sync_dtype=torch.bfloat16,
store_params=False,
store_param_remainders=True,
)
def test_raises_on_mismatch(self):
torch.manual_seed(self.seed + self.rank)
# Identical models with data-parallel and ZeRO
num_layers = 11
layer_size = 7
ref_model, ref_optim, dist_model, dist_optim = make_models(
num_layers,
layer_size,
)
# Only perform training step with distributed model
dist_optim.zero_grad()
x = torch.rand(3, layer_size) - 0.5
x = x.to(dtype=torch.float32, device='cuda')
dy = torch.rand_like(x) - 0.5
y = dist_model(x)
y.backward(dy)
dist_optim.step()
# Check that parameters do not match
for ref_param, dist_param in zip(ref_model.parameters(),
dist_model.parameters()):
self.assertRaises(
AssertionError,
torch.testing.assert_close,
dist_param, ref_param,
)
def test_clip_grad_norm(self):
torch.manual_seed(self.seed + self.rank)
# Identical models with data-parallel and ZeRO
ref_model, ref_optim, dist_model, dist_optim = make_models(1, 1)
# Training steps with pre-determined gradients
xs = [3, 1, 4, 1, 5, 9]
dys = [1, -1, 1, -1, 1, -1]
for x, dy in zip(xs, dys):
x = torch.tensor([[x]], dtype=torch.float32, device='cuda')
dy = torch.tensor([[dy]], dtype=torch.float32, device='cuda')
# Reference implementation
ref_optim.zero_grad()
y_ref = ref_model(x.detach())
y_ref.backward(dy.detach())
ref_grad_norm = torch.nn.utils.clip_grad_norm_(ref_model.parameters(), 3.5)
ref_optim.step()
# Distributed implementation
dist_optim.zero_grad()
y_dist = dist_model(x.detach())
y_dist.backward(dy.detach())
dist_grad_norm = dist_optim.clip_grad_norm(3.5)
dist_optim.step()
# Check that parameters match
torch.testing.assert_close(dist_grad_norm, ref_grad_norm)
for ref_param, dist_param in zip(ref_model.parameters(),
dist_model.parameters()):
torch.testing.assert_close(dist_param, ref_param)
def test_grad_scaler(self):
torch.manual_seed(self.seed + self.rank)
# Identical models with data-parallel and ZeRO
ref_model, ref_optim, dist_model, dist_optim = make_models(1, 1)
grad_scaler_args = dict(
init_scale=3.21,
growth_factor=1.23,
backoff_factor=0.876,
growth_interval=1,
)
ref_scaler = torch.cuda.amp.GradScaler(**grad_scaler_args)
dist_scaler = torch.cuda.amp.GradScaler(**grad_scaler_args)
# Training steps with pre-determined gradients
xs = [3, 1, 4, 1, 5, 9]
dys = [1, float('inf'), 1, 1, float('nan'), -1]
for x, dy in zip(xs, dys):
x = torch.tensor([[x]], dtype=torch.float32, device='cuda')
dy = torch.tensor([[dy]], dtype=torch.float32, device='cuda')
# Reference implementation
ref_optim.zero_grad()
y_ref = ref_model(x.detach())
ref_scaler.scale(y_ref).backward(dy.detach())
ref_scaler.step(ref_optim)
ref_scaler.update()
# Distributed implementation
dist_optim.zero_grad()
y_dist = dist_model(x.detach())
dist_scaler.scale(y_dist).backward(dy.detach())
dist_scaler.step(dist_optim)
dist_scaler.update()
# Check that parameters match
for ref_param, dist_param in zip(ref_model.parameters(),
dist_model.parameters()):
torch.testing.assert_close(dist_param, ref_param)
def test_checkpoint(self):
# Construct two models with same config and different params
num_layers = 5
layer_size = 2
torch.manual_seed(self.seed + self.rank)
_, _, model_save, optim_save = make_models(num_layers, layer_size)
_, _, model_load, optim_load = make_models(num_layers, layer_size)
# Train one of the models
num_steps = 3
micro_batch_steps = 2
batch_size = 4
for step in range(num_steps):
optim_save.zero_grad()
for micro_step in range(micro_batch_steps):
x = torch.rand(batch_size, layer_size) - 0.5
dy = torch.rand_like(x) - 0.5
x = x.cuda()
dy = dy.cuda()
y = model_save(x)
y.backward(dy)
optim_save.step()
# Make sure models are different
for param_save, param_load in zip(model_save.parameters(),
model_load.parameters()):
self.assertRaises(
AssertionError,
torch.testing.assert_close,
param_load, param_save,
)
# Save state on root rank and load on all ranks
state_dict = {
'model': model_save.state_dict(),
'optim': optim_save.state_dict(),
}
if self.rank == 0:
state_bytes = io.BytesIO()
torch.save(state_dict, state_bytes)
state_bytes = [state_bytes.getvalue()]
else:
state_bytes = [None]
torch.distributed.broadcast_object_list(state_bytes, src=0)
state_bytes = io.BytesIO(state_bytes[0])
state_dict = torch.load(state_bytes)
model_load.load_state_dict(state_dict['model'])
optim_load.load_state_dict(state_dict['optim'])
# Make sure models are identical
for param_save, param_load in zip(model_save.parameters(),
model_load.parameters()):
torch.testing.assert_close(param_load, param_save)
# Train both models
num_steps = 3
micro_batch_steps = 3
batch_size = 5
for step in range(num_steps):
# Reset gradients
optim_save.zero_grad()
optim_load.zero_grad()
# Forward and backward passes
for micro_step in range(micro_batch_steps):
# Synthetic data
x = torch.rand(batch_size, layer_size) - 0.5
dy = torch.rand_like(x) - 0.5
x = x.cuda()
dy = dy.cuda()
# Forward and backward pass
x_save = x.detach().clone().requires_grad_(True)
y_save = model_save(x_save)
y_save.backward(dy)
x_load = x.detach().clone().requires_grad_(True)
y_load = model_load(x_load)
y_load.backward(dy)
# Check that data tensors match
torch.testing.assert_close(y_load, y_save)
torch.testing.assert_close(x_load.grad, x_save.grad)
# Optimizer step
optim_save.step()
optim_load.step()
# Check that parameters match
for param_save, param_load in zip(model_save.parameters(),
model_load.parameters()):
torch.testing.assert_close(param_load, param_save)
if __name__ == "__main__":
# Assume script has been run with torchrun
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/optimizers/test_dist_adam.py |
import unittest
import torch
from torch.testing._internal import common_utils
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
SKIP_TEST = None
try:
from apex.contrib.bottleneck import Bottleneck, SpatialBottleneck
from apex.contrib.bottleneck import HaloExchangerPeer
from apex.contrib.peer_memory import PeerMemoryPool
except ImportError as e:
SKIP_TEST = e
def ground_truth_bottleneck(C, dtype, explicit_nhwc):
bottleneck = Bottleneck(C, C, C, use_cudnn=True, explicit_nhwc=explicit_nhwc)
bottleneck.to(dtype=dtype, device="cuda")
for p in bottleneck.parameters():
torch.distributed.broadcast(p, 0)
for b in bottleneck.buffers():
torch.distributed.broadcast(b, 0)
return bottleneck
def print_bottleneck_p_and_b(bottleneck):
with torch.no_grad():
for n, p in bottleneck.named_parameters():
print("%s :: %s" % (n, str(p.norm(p=2, dtype=torch.float32))))
for n, p in bottleneck.named_buffers():
print("%s :: %s" % (n, str(p.norm(p=2, dtype=torch.float32))))
def has_nan(x):
if isinstance(x, list) or isinstance(x, tuple):
for xx in x:
if torch.any(torch.isnan(xx)):
return True
return False
elif isinstance(x, dict):
for k, v in x.items():
if torch.any(torch.isnan(v)):
return True
else:
return torch.any(torch.isnan(x))
def rel_diff_t(xx1, xx2):
return ((xx1 - xx2).norm(p=2, dtype=torch.float32) / (xx1 + xx2).norm(p=2, dtype=torch.float32)).item()
def rel_diff(x1, x2):
if isinstance(x1, list) or isinstance(x1, tuple):
return [rel_diff_t(xx1, xx2) for xx1, xx2 in zip(x1, x2)]
elif isinstance(x1, dict):
return [rel_diff_t(xx1, xx2) for (k1, xx1), (k2, xx2) in zip(x1.items(), x2.items())]
else:
return rel_diff_t(x1, x2)
def graph_it(bottleneck, x):
print("Graphing")
with torch.no_grad():
x = x.clone()
x.grad = None
x.requires_grad = True
return torch.cuda.make_graphed_callables(bottleneck, (x,))
def clone_inputs(bottleneck, x, dy=None):
with torch.no_grad():
x = x.clone()
x.grad = None
x.requires_grad = True
if dy is None:
y = bottleneck(x)
dy = torch.randn_like(y) / 1e2
torch.distributed.broadcast(dy, 0)
return x, dy
def fprop_and_bprop(bottleneck, x, dy):
y = bottleneck(x)
y.backward(dy)
dgrad = x.grad.detach()
wgrad = {}
for n, p in bottleneck.named_parameters():
wgrad[n] = p.grad.detach()
return x, y, dy, dgrad, wgrad
def ground_truth(N, C, H, W, dtype, memory_format, bottleneck):
if memory_format == 1:
# 1 -> explicit nhwc
explicit_nhwc = True
with torch.no_grad():
x = torch.randn([N, H, W, C], dtype=dtype, device="cuda")
torch.distributed.broadcast(x, 0)
x, dy = clone_inputs(bottleneck, x)
return fprop_and_bprop(bottleneck, x, dy)
else:
# 2 -> native nhwc
# 3 -> nchw
explicit_nhwc = False
assert False, "Not implemented yet"
def print_ground_truth(gt):
x, y, dy, dgrad, wgrad = gt
if has_nan(y) or has_nan(dgrad) or has_nan(wgrad):
print("Error! Ground truth has NAN")
else:
print("Ok! No NAN found in ground truth")
def apply_to_different_bottleneck(gt, bottleneck):
with torch.no_grad():
x, _, dy, _, _ = gt
x, dy = clone_inputs(bottleneck, x, dy)
return fprop_and_bprop(bottleneck, x, dy)
def compare_single_field(results, f1, f2, l0, l1, l2):
if has_nan(f1) and has_nan(f2):
results[l0] = "both NAN"
elif has_nan(f1):
results[l0] = "%s.%s NAN" % (l1, l0)
elif has_nan(f2):
results[l0] = "%s.%s NAN" % (l2, l0)
else:
results[l0] = "%s" % (str(rel_diff(f1, f2)))
def compare(gt, bt):
x1, y1, dy1, dgrad1, wgrad1 = gt
x2, y2, dy2, dgrad2, wgrad2 = bt
results = {}
compare_single_field(results, y1, y2, "y", "gt", "bt")
compare_single_field(results, dy1, dy2, "dy", "gt", "bt")
compare_single_field(results, dgrad1, dgrad2, "dgrad", "gt", "bt")
compare_single_field(results, wgrad1, wgrad2, "wgrad", "gt", "bt")
for i in range(torch.distributed.get_world_size()):
if i == torch.distributed.get_rank():
print(i, results)
torch.distributed.barrier()
def spatial_parallel_bottleneck(C, dtype, explicit_nhwc, gt_bottleneck, spatial_parallel_args):
spatial_bottleneck = SpatialBottleneck(
C,
C,
C,
use_cudnn=True,
explicit_nhwc=explicit_nhwc,
spatial_parallel_args=spatial_parallel_args,
)
spatial_bottleneck.to(dtype=dtype, device="cuda")
with torch.no_grad():
sp = {}
for n, p in spatial_bottleneck.named_parameters():
sp[n] = p
for n, p in gt_bottleneck.named_parameters():
sp[n].copy_(p)
sb = {}
for n, b in spatial_bottleneck.named_buffers():
sb[n] = b
for n, b in gt_bottleneck.named_buffers():
sb[n].copy_(b)
return spatial_bottleneck
def n_way_spatial(halex, gt_bottleneck, gt, explicit_nhwc, world_size, rank, fp32_reduce=False):
assert explicit_nhwc, "Only tested for explicit nhwc"
x, _, dy, _, _ = gt
N, H, W, C = list(x.shape) # Tensor is already shaped properly for n-way parallel
dtype = x.dtype
spatial_group_size = world_size
spatial_group_rank = rank
spatial_communicator = None
spatial_halo_exchanger = halex
spatial_method = 1 # 1 -> overlap halo and main conv, 2 -> wait for halo, conv on padded x
use_delay_kernel = False
spatial_parallel_args = (
spatial_group_size,
spatial_group_rank,
spatial_communicator,
spatial_halo_exchanger,
spatial_method,
use_delay_kernel,
)
spatial_bottleneck = spatial_parallel_bottleneck(C, dtype, explicit_nhwc, gt_bottleneck, spatial_parallel_args)
with torch.no_grad():
Hs = H // spatial_group_size
xs = x[:, spatial_group_rank * Hs : (spatial_group_rank + 1) * Hs, :, :].clone()
dys = dy[:, spatial_group_rank * Hs : (spatial_group_rank + 1) * Hs, :, :].clone()
xs.requires_grad = True
spatial_bottleneck = graph_it(spatial_bottleneck, xs)
_, y, _, dgrad, wgrad = fprop_and_bprop(spatial_bottleneck, xs, dys)
# gather output pieces
for n, p in wgrad.items():
if fp32_reduce:
p32 = p.float()
torch.distributed.all_reduce(p32)
p.copy_(p32.half())
else:
torch.distributed.all_reduce(p)
ys = [torch.empty_like(y) for _ in range(spatial_group_size)]
torch.distributed.all_gather(ys, y)
y = torch.cat(ys, dim=1)
dgrads = [torch.empty_like(dgrad) for _ in range(spatial_group_size)]
torch.distributed.all_gather(dgrads, dgrad)
dgrad = torch.cat(dgrads, dim=1)
return x, y, dy, dgrad, wgrad
def main():
torch.use_deterministic_algorithms(True)
torch.distributed.init_process_group("nccl")
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(rank)
explicit_nhwc = True
dtype = torch.float16
N, C, H, W = 1, 64, 200, 336
Hs = ((H + 8 * world_size - 1) // (8 * world_size)) * 8
H = Hs * world_size
gt_bottleneck = ground_truth_bottleneck(C, dtype, explicit_nhwc)
gt = ground_truth(N, C, H, W, dtype, 1, gt_bottleneck)
# verify that spatial bottleneck with group_size 1 produces same results as ground truth bottleneck
spatial_bottleneck = spatial_parallel_bottleneck(C, dtype, explicit_nhwc, gt_bottleneck, None)
bt = apply_to_different_bottleneck(gt, spatial_bottleneck)
compare(gt, bt)
# print_bottleneck_p_and_b(gt_bottleneck)
# print_bottleneck_p_and_b(spatial_bottleneck)
group_size = world_size
group = rank // group_size
ranks = [group * group_size + i for i in range(group_size)]
rank_in_group = rank % group_size
spatial_group_size = world_size
spatial_communicator = None
peer_pool = PeerMemoryPool(0, 64 * 1024 * 1024, ranks)
# class HaloExchangerNoComm(HaloExchanger):
# def __init__(self, ranks, rank_in_group):
# class HaloExchangerAllGather(HaloExchanger):
# def __init__(self, ranks, rank_in_group, comm):
# class HaloExchangerSendRecv(HaloExchanger):
# def __init__(self, ranks, rank_in_group):
# class HaloExchangerPeer(HaloExchanger):
# def __init__(self, ranks, rank_in_group, peer_pool, explicit_nhwc, numSM=1):
# halex = HaloExchangerAllGather(ranks, rank_in_group)
# halex = HaloExchangerSendRecv(ranks, rank_in_group)
halex = HaloExchangerPeer(ranks, rank_in_group, peer_pool, explicit_nhwc, numSM=0)
# print("halex.signals = %s" % (str(halex.signals)))
# Make sure peer memory halo exchanger has finished initializing flags on all ranks before proceeding
# torch.cuda.synchronize()
# torch.distributed.barrier()
bt2 = n_way_spatial(halex, gt_bottleneck, gt, explicit_nhwc, world_size, rank, fp32_reduce=True)
compare(gt, bt2)
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class TestBottleneck(NcclDistributedTestBase):
# PyTorch's float16 tolerance values, see https://pytorch.org/docs/stable/testing.html#torch.testing.assert_close
fp16_tolerance = {"atol": 1e-5, "rtol": 1e-3}
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 2)
def test_bottleneck_without_peer_memory(self) -> None:
explicit_nhwc: bool = True
dtype: torch.dtype = torch.float16
N, C, H, W = 1, 64, 200, 336
Hs = ((H + 8 * self.world_size - 1) // (8 * self.world_size)) * 8
H = Hs * self.world_size
gt_bottleneck = ground_truth_bottleneck(C, dtype, explicit_nhwc)
gt = ground_truth(N, C, H, W, dtype, 1, gt_bottleneck)
spatial_bottleneck = spatial_parallel_bottleneck(C, dtype, explicit_nhwc, gt_bottleneck, None)
bt = apply_to_different_bottleneck(gt, spatial_bottleneck)
self.assertEqual(gt, bt, **self.fp16_tolerance)
def test_bottleneck_with_peer_memory(self) -> None:
explicit_nhwc: bool = True
dtype: torch.dtype = torch.float16
N, C, H, W = 1, 64, 200, 336
Hs = ((H + 8 * self.world_size - 1) // (8 * self.world_size)) * 8
H = Hs * self.world_size
gt_bottleneck = ground_truth_bottleneck(C, dtype, explicit_nhwc)
gt = ground_truth(N, C, H, W, dtype, 1, gt_bottleneck)
group = self.rank // self.world_size
ranks = [group * self.world_size + i for i in range(self.world_size)]
rank_in_group = self.rank % self.world_size
spatial_group_size, spatial_communicator = self.world_size, None
peer_pool = PeerMemoryPool(0, 64 * 1024 * 1024, ranks)
halo_exchanger_peer = HaloExchangerPeer(ranks, rank_in_group, peer_pool, explicit_nhwc, numSM=0)
bt2 = n_way_spatial(
halo_exchanger_peer, gt_bottleneck, gt, explicit_nhwc, self.world_size, self.rank, fp32_reduce=True
)
# TODO(crcrpar): Investigate the implementation to mitigate the numerical errors.
# NOTE(crcrpar): This assert often fails due to numerical errors.
# self.assertEqual(gt, bt2, **self.fp16_tolerance)
if __name__ == "__main__":
common_utils.run_tests()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/bottleneck/test_bottleneck_module.py |
GeneSplice-main | GeneSplice/apex/apex/contrib/test/bottleneck/__init__.py |
|
GeneSplice-main | GeneSplice/apex/apex/contrib/test/conv_bias_relu/__init__.py |
|
import copy
import math
import random
import unittest
import torch
import torch.nn.functional as F
HAS_CONV_BIAS_RELU = None
try:
from apex.contrib.conv_bias_relu import ConvBiasReLU, ConvBias, ConvBiasMaskReLU, ConvFrozenScaleBiasReLU
except ImportError as e:
HAS_CONV_BIAS_RELU = False
else:
HAS_CONV_BIAS_RELU = True
@unittest.skipIf(not HAS_CONV_BIAS_RELU, "`apex.contrib.conv_bias_relu` is not found.")
class FusedDenseTest(unittest.TestCase):
def setUp(self, seed=0):
super().setUp()
torch.manual_seed(seed)
self.batch_size = random.randint(1, 64)
self.in_channels = random.randint(1, 64) * 8
self.out_channels = random.randint(1, 64) * 8
self.in_height = self.in_width = random.randint(5, 100)
self.conv_kernel_size = random.randint(1, 5)
self.conv_pad = random.randint(0, int(self.conv_kernel_size / 2))
self.conv_stride = random.randint(1, 5)
self.conv_dilation = 1
self.out_height = self.out_width = \
math.floor((self.in_height + 2 * self.conv_pad - \
self.conv_dilation * (self.conv_kernel_size - 1) - 1) / self.conv_stride + 1)
self.x = torch.randint(low=-16, high=16,
size=[self.batch_size, self.in_channels, self.in_height, self.in_width]) \
.cuda().to(memory_format=torch.channels_last).float()
self.x_ = self.x.clone()
self.x.requires_grad_()
self.x_.requires_grad_()
self.mask = torch.randn([self.batch_size, self.out_channels, self.out_height, self.out_width]).cuda().to(memory_format=torch.channels_last)
self.mask = (self.mask > 0).to(torch.int8)
self.mask_ = self.mask.clone()
self.scale = torch.randn([1, self.out_channels, 1, 1]).half().cuda()
self.scale_ = self.scale.clone()
self.bias = torch.randn([1, self.out_channels, 1, 1]).half().cuda()
self.bias_ = self.bias.clone()
self.conv1 = torch.nn.Conv2d(self.in_channels, self.out_channels, self.conv_kernel_size,
stride=self.conv_stride, padding=self.conv_pad).cuda().to(memory_format=torch.channels_last)
self.conv1_ = copy.deepcopy(self.conv1)
self.conv2 = torch.nn.Conv2d(self.in_channels, self.out_channels, self.conv_kernel_size,
stride=self.conv_stride, padding=self.conv_pad, bias=False).cuda().to(memory_format=torch.channels_last)
self.conv2_ = copy.deepcopy(self.conv2)
print()
print('> input=[{}, {}, {}, {}]'.format(self.batch_size, self.in_channels, self.in_height, self.in_width))
print('> kernel=[{}, {}, {}, {}], stride={}, pad={}'.format(self.out_channels, self.in_channels,
self.conv_kernel_size, self.conv_kernel_size,
self.conv_stride, self.conv_pad))
def test_conv_bias_relu(self):
with torch.cuda.amp.autocast(dtype=torch.half):
out = ConvBiasReLU(self.x, self.conv1.weight, self.conv1.bias.reshape(1, -1, 1, 1), self.conv_pad, self.conv_stride)
loss = (out.float()**2).sum() / out.numel()
loss.backward()
with torch.cuda.amp.autocast(dtype=torch.half):
out_ = F.relu(self.conv1_(self.x_))
loss_ = (out_**2).sum() / out_.numel()
loss_.backward()
torch.testing.assert_close(out_, out, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.conv1_.bias.grad, self.conv1.bias.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.conv1_.weight.grad, self.conv1.weight.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.x_.grad, self.x.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
def test_conv_bias(self):
with torch.cuda.amp.autocast(dtype=torch.half):
out = ConvBias(self.x, self.conv1.weight, self.conv1.bias.reshape(1, -1, 1, 1), self.conv_pad, self.conv_stride)
loss = (out.float()**2).sum() / out.numel()
loss.backward()
with torch.cuda.amp.autocast(dtype=torch.half):
out_ = self.conv1_(self.x_)
loss_ = (out_**2).sum() / out_.numel()
loss_.backward()
torch.testing.assert_close(out, out_, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.conv1_.bias.grad, self.conv1.bias.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.conv1_.weight.grad, self.conv1.weight.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.x_.grad, self.x.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
def test_conv_bias_mask_relu(self):
with torch.cuda.amp.autocast(dtype=torch.half):
out = ConvBiasMaskReLU(self.x, self.conv1.weight, self.conv1.bias.reshape(1, -1, 1, 1), self.mask, self.conv_pad, self.conv_stride)
loss = (out.float()**2).sum() / out.numel()
loss.backward()
with torch.cuda.amp.autocast(dtype=torch.half):
out_ = F.relu(self.conv1_(self.x_) * self.mask_)
loss_ = (out_**2).sum() / out_.numel()
loss_.backward()
torch.testing.assert_close(out, out_, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.conv1_.bias.grad, self.conv1.bias.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.conv1_.weight.grad, self.conv1.weight.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.x_.grad, self.x.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
def test_conv_frozen_scale_bias_relu(self):
with torch.cuda.amp.autocast(dtype=torch.half):
out = ConvFrozenScaleBiasReLU(self.x, self.conv2.weight, self.scale, self.bias, self.conv_pad, self.conv_stride)
loss = (out.float()**2).sum() / out.numel()
loss.backward()
with torch.cuda.amp.autocast(dtype=torch.half):
out_ = F.relu(self.conv2_(self.x_) * self.scale_ + self.bias_)
loss_ = (out_**2).sum() / out_.numel()
loss_.backward()
torch.testing.assert_close(out, out_, atol=2.5e-3, rtol=2.5e-3, equal_nan=True)
torch.testing.assert_close(self.conv2_.weight.grad, self.conv2.weight.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
torch.testing.assert_close(self.x_.grad, self.x.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
if __name__ == '__main__':
unittest.main()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/conv_bias_relu/test_conv_bias_relu.py |
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import SelfMultiheadAttn
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class SelfMultiheadAttnNormAddTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
self.seq_length = 80
self.sequences = 10
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.ref_layer = SelfMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=True, impl="default"
)
self.ref_layer.cuda().half()
self.ref_layer.reset_parameters()
self.ref_inputs = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
# Reset seed so parameters are identical
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.tst_layer = SelfMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=True, impl="fast"
)
self.tst_layer.cuda().half()
self.tst_layer.reset_parameters()
self.tst_inputs = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
def test_self_multihead_attn_norm_add(self):
grads = torch.randn_like(self.tst_inputs)
for _ in range(0, 5):
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
self.ref_inputs.backward(grads)
self.tst_inputs.backward(grads)
torch.testing.assert_close(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
unittest.main()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/multihead_attn/test_self_multihead_attn_norm_add.py |
GeneSplice-main | GeneSplice/apex/apex/contrib/test/multihead_attn/__init__.py |
|
import unittest
import torch
import torch.nn.functional as F
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import fast_mask_softmax_dropout_func
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class FusedSoftmaxTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 80
self.sequences = 10
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.mask = (torch.randn(self.sequences, self.seq_length) > 0).cuda()
self.mask = self.mask.half() * -10000
self.ref_inputs = torch.randn(
self.heads * self.sequences,
self.seq_length,
self.seq_length,
dtype=torch.float16,
device=torch.device("cuda"),
).requires_grad_(True)
self.tst_inputs = self.ref_inputs.clone().detach().requires_grad_(True)
def test_fused_softmax(self):
grads = torch.randn_like(self.tst_inputs)
y_ref = self.ref_inputs.view(self.sequences, self.heads, self.seq_length, self.seq_length)
y_ref = y_ref + self.mask.unsqueeze(1).unsqueeze(2)
y_ref = y_ref.view(self.sequences * self.heads, self.seq_length, self.seq_length)
y_ref = F.softmax(y_ref, dim=-1)
y_ref = torch._fused_dropout(y_ref, 1.0)
y_tst = fast_mask_softmax_dropout_func(True, self.heads, self.tst_inputs, self.mask, True, 0.0)
y_ref[0].backward(grads)
y_tst.backward(grads)
torch.testing.assert_close(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(y_ref[0], y_tst, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
unittest.main()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/multihead_attn/test_mha_fused_softmax.py |
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import EncdecMultiheadAttn
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class EncdecMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
self.seq_length = 80
self.sequences = 10
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.ref_layer = EncdecMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=False, impl="default"
)
self.ref_layer.cuda().half()
self.ref_layer.reset_parameters()
self.ref_inputs_q = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
self.ref_inputs_k = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
# Reset seed so parameters are identical
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.tst_layer = EncdecMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=False, impl="fast"
)
self.tst_layer.cuda().half()
self.tst_layer.reset_parameters()
self.tst_inputs_q = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
self.tst_inputs_k = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
def test_encdec_multihead_attn(self):
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs_q,
self.ref_inputs_k,
self.ref_inputs_k,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs_q,
self.tst_inputs_k,
self.tst_inputs_k,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
torch.testing.assert_close(self.ref_inputs_q, self.tst_inputs_q, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(self.ref_inputs_k, self.tst_inputs_k, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
with torch.no_grad():
ref_grads = torch.randn_like(ref_outputs)
tst_grads = ref_grads.clone()
ref_outputs.backward(ref_grads)
tst_outputs.backward(tst_grads)
torch.testing.assert_close(self.ref_inputs_q.grad, self.tst_inputs_q.grad, atol=1e-3, rtol=1e-3)
def test_encdec_multihead_attn_time_mask(self):
grads = torch.randn_like(self.tst_inputs_q)
time_mask_byte = torch.triu(
torch.ones(
self.tst_inputs_q.size(0), self.tst_inputs_k.size(0), device=torch.device("cuda"), dtype=torch.uint8
),
1,
)
time_mask_bool = time_mask_byte.to(torch.bool)
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs_q,
self.ref_inputs_k,
self.ref_inputs_k,
key_padding_mask=None,
need_weights=False,
attn_mask=time_mask_bool,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs_q,
self.tst_inputs_k,
self.tst_inputs_k,
key_padding_mask=None,
need_weights=False,
attn_mask=time_mask_byte,
is_training=True,
)
self.ref_inputs_q.backward(grads)
self.tst_inputs_q.backward(grads)
torch.testing.assert_close(self.ref_inputs_q, self.tst_inputs_q, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(self.ref_inputs_k, self.tst_inputs_k, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs_q.grad, self.tst_inputs_q.grad, atol=1e-3, rtol=1e-3)
def test_encdec_multihead_attn_pad_mask(self):
grads = torch.randn_like(self.tst_inputs_q)
pad_mask_byte = torch.tril(
torch.ones(
self.tst_inputs_k.size(1), self.tst_inputs_k.size(0), device=torch.device("cuda"), dtype=torch.uint8
),
1,
)
pad_mask_bool = pad_mask_byte.to(torch.bool)
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs_q,
self.ref_inputs_k,
self.ref_inputs_k,
key_padding_mask=pad_mask_bool,
need_weights=False,
attn_mask=None,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs_q,
self.tst_inputs_k,
self.tst_inputs_k,
key_padding_mask=pad_mask_byte,
need_weights=False,
attn_mask=None,
is_training=True,
)
self.ref_inputs_q.backward(grads)
self.tst_inputs_q.backward(grads)
torch.testing.assert_close(self.ref_inputs_q, self.tst_inputs_q, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(self.ref_inputs_k, self.tst_inputs_k, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs_q.grad, self.tst_inputs_q.grad, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
unittest.main()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/multihead_attn/test_encdec_multihead_attn.py |
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import SelfMultiheadAttn
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class SelfMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 80
self.sequences = 10
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.ref_layer = SelfMultiheadAttn(
self.hidden_dim,
self.heads,
dropout=self.dropout_prob,
bias=True,
include_norm_add=False,
separate_qkv_params=True,
mask_additive=True,
impl="default",
)
self.ref_layer.cuda().half()
self.ref_layer.reset_parameters()
self.ref_inputs = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
# Reset seed so parameters are identical
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.tst_layer = SelfMultiheadAttn(
self.hidden_dim,
self.heads,
dropout=self.dropout_prob,
bias=True,
include_norm_add=False,
separate_qkv_params=True,
mask_additive=True,
impl="fast",
)
self.tst_layer.cuda().half()
self.tst_layer.reset_parameters()
self.tst_inputs = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
def test_self_multihead_attn_additive_mask(self):
grads = torch.randn_like(self.tst_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0) * -10000.0).half().cuda()
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True,
)
self.ref_inputs.backward(grads)
self.tst_inputs.backward(grads)
torch.testing.assert_close(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
unittest.main()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/multihead_attn/test_fast_self_multihead_attn_bias.py |
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import EncdecMultiheadAttn
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class EncdecMultiheadAttnNormAddTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 80
self.sequences = 10
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.ref_layer = EncdecMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=True, impl="default"
)
self.ref_layer.cuda().half()
self.ref_layer.reset_parameters()
self.ref_inputs_q = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
self.ref_inputs_k = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
# Reset seed so parameters are identical
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.tst_layer = EncdecMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=True, impl="fast"
)
self.tst_layer.cuda().half()
self.tst_layer.reset_parameters()
self.tst_inputs_q = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
self.tst_inputs_k = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
def test_encdec_multihead_attn_norm_add(self):
grads = torch.randn_like(self.tst_inputs_q)
for _ in range(5):
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs_q,
self.ref_inputs_k,
self.ref_inputs_k,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs_q,
self.tst_inputs_k,
self.tst_inputs_k,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
self.ref_inputs_q.backward(grads)
self.tst_inputs_q.backward(grads)
torch.testing.assert_close(self.ref_inputs_q, self.tst_inputs_q, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(self.ref_inputs_k, self.tst_inputs_k, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs_q.grad, self.tst_inputs_q.grad, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
unittest.main()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/multihead_attn/test_encdec_multihead_attn_norm_add.py |
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.multihead_attn import SelfMultiheadAttn
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class SelfMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 80
self.sequences = 10
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.ref_layer = SelfMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=False, impl="default"
)
self.ref_layer.cuda().half()
self.ref_layer.reset_parameters()
self.ref_inputs = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
# Reset seed so parameters are identical
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.tst_layer = SelfMultiheadAttn(
self.hidden_dim, self.heads, dropout=self.dropout_prob, bias=False, include_norm_add=False, impl="fast"
)
self.tst_layer.cuda().half()
self.tst_layer.reset_parameters()
self.tst_inputs = torch.randn(
self.seq_length, self.sequences, self.hidden_dim, dtype=torch.float16, device=torch.device("cuda")
).requires_grad_(True)
def test_self_multihead_attn(self):
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=None,
need_weights=False,
attn_mask=None,
is_training=True,
)
torch.testing.assert_close(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
with torch.no_grad():
ref_grads = torch.randn_like(self.tst_inputs)
tst_grads = ref_grads.clone()
ref_outputs.backward(ref_grads)
tst_outputs.backward(tst_grads)
torch.testing.assert_close(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3)
def test_self_multihead_attn_time_mask(self):
grads = torch.randn_like(self.tst_inputs)
time_mask_byte = torch.triu(
torch.ones(
self.tst_inputs.size(0), self.tst_inputs.size(0), device=torch.device("cuda"), dtype=torch.uint8
),
1,
)
time_mask_bool = time_mask_byte.to(torch.bool)
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=None,
need_weights=False,
attn_mask=time_mask_bool,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=None,
need_weights=False,
attn_mask=time_mask_byte,
is_training=True,
)
self.ref_inputs.backward(grads)
self.tst_inputs.backward(grads)
torch.testing.assert_close(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3)
def test_self_multihead_attn_pad_mask(self):
grads = torch.randn_like(self.tst_inputs)
pad_mask_byte = torch.tril(
torch.ones(
self.tst_inputs.size(1), self.tst_inputs.size(0), device=torch.device("cuda"), dtype=torch.uint8
),
1,
)
pad_mask_bool = pad_mask_byte.to(torch.bool)
ref_outputs, _ = self.ref_layer.forward(
self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=pad_mask_bool,
need_weights=False,
attn_mask=None,
is_training=True,
)
tst_outputs, _ = self.tst_layer.forward(
self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=pad_mask_byte,
need_weights=False,
attn_mask=None,
is_training=True,
)
self.ref_inputs.backward(grads)
self.tst_inputs.backward(grads)
torch.testing.assert_close(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5)
torch.testing.assert_close(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
unittest.main()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/multihead_attn/test_self_multihead_attn.py |
import random
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.clip_grad import clip_grad_norm_
except ImportError as e:
SKIP_TEST = e
def make_params(
num_params,
sizes=[1,2,3,4,5],
num_dims=[1,2,3],
dtypes=[torch.float32],
devices=['cuda'],
make_copy=False,
):
"""Construct parameters with random configurations"""
# Construct parameters
params = []
for _ in range(num_params):
dims = [random.choice(sizes) for _ in range(random.choice(num_dims))]
dtype = random.choice(dtypes)
device = random.choice(devices)
p = torch.nn.Parameter(torch.randn(dims, dtype=dtype, device=device))
p.grad = torch.randn_like(p)
params.append(p)
# Copy parameters if needed
if make_copy:
params_copy = []
for p in params:
p_copy = p.clone().detach()
p_copy.grad = p.grad.clone().detach()
params_copy.append(p_copy)
return params, params_copy
else:
return params
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class ClipGradNormTest(unittest.TestCase):
def setUp(self, seed=1234):
super().setUp()
random.seed(seed)
torch.manual_seed(seed)
def test_matches_pytorch(
self,
num_params=41,
dtypes=[torch.float32, torch.float16, torch.float64],
devices=['cuda', 'cpu'],
max_norm=0.54321,
norm_type=2.0,
rtol=1e-3,
atol=1e-20,
):
"""Make sure PyTorch and Apex gradient clipping produce same results"""
# Construct identical sets of parameters
torch_params, apex_params = make_params(
num_params,
dtypes=dtypes,
devices=devices,
make_copy=True,
)
# Apply gradient clipping
torch_norm = torch.nn.utils.clip_grad_norm_(
torch_params,
max_norm,
norm_type=norm_type,
)
apex_norm = clip_grad_norm_(
apex_params,
max_norm,
norm_type=norm_type,
)
# Make sure PyTorch and Apex get same results
torch.testing.assert_close(
apex_norm, torch_norm,
rtol=rtol,
atol=atol,
check_dtype=False,
)
for torch_p, apex_p in zip(torch_params, apex_params):
torch.testing.assert_close(
apex_p, torch_p,
rtol=0,
atol=0,
) # Params should be unaffected
torch.testing.assert_close(
apex_p.grad, torch_p.grad,
rtol=rtol,
atol=atol,
)
def test_matches_pytorch_fp16(self):
self.test_matches_pytorch(num_params=11, dtypes=[torch.float16])
def test_matches_pytorch_fp32(self):
self.test_matches_pytorch(dtypes=[torch.float32], rtol=1e-6)
def test_matches_pytorch_fp64(self):
self.test_matches_pytorch(dtypes=[torch.float64], rtol=1e-15)
def test_matches_pytorch_cpu(self):
self.test_matches_pytorch(devices=['cpu'])
def test_matches_pytorch_infnorm(self):
self.test_matches_pytorch(norm_type=float('inf'))
def test_matches_pytorch_1norm(self):
self.test_matches_pytorch(norm_type=1.0)
def test_raises_on_mismatch(self):
# Construct different sets of parameters
torch_params, apex_params = make_params(7, make_copy=True)
with torch.no_grad():
torch_params[0].grad.view(-1)[0] = 1.23
apex_params[0].grad.view(-1)[0] = 3.21
# Apply gradient clipping
torch_norm = torch.nn.utils.clip_grad_norm_(
torch_params,
0.54321,
)
apex_norm = clip_grad_norm_(
apex_params,
0.54321,
)
# Make sure PyTorch and Apex get different results
self.assertRaises(
AssertionError,
torch.testing.assert_close,
apex_norm, torch_norm,
rtol=1e-3,
atol=1e-20,
check_dtype=False,
)
for torch_p, apex_p in zip(torch_params, apex_params):
self.assertRaises(
AssertionError,
torch.testing.assert_close,
apex_p.grad, torch_p.grad,
rtol=1e-3,
atol=1e-20,
)
def test_raises_on_nan(self):
params = make_params(5, num_dims=[1])
params[2].grad[-1] = float('NaN')
self.assertRaises(
RuntimeError, clip_grad_norm_, params, 1.0, error_if_nonfinite=True)
def test_raises_on_inf(self):
params = make_params(5, num_dims=[1])
params[2].grad[-1] = float('inf')
self.assertRaises(
RuntimeError, clip_grad_norm_, params, 1.0, error_if_nonfinite=True)
if __name__ == "__main__":
unittest.main()
| GeneSplice-main | GeneSplice/apex/apex/contrib/test/clip_grad/test_clip_grad.py |
GeneSplice-main | GeneSplice/apex/apex/contrib/test/clip_grad/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.