python_code
stringlengths 0
456k
|
---|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .profiler import *
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
from deepspeed.accelerator import get_accelerator
Tensor = torch.Tensor
module_flop_count = []
module_mac_count = []
old_functions = {}
class FlopsProfiler(object):
"""Measures the latency, number of estimated floating-point operations and parameters of each module in a PyTorch model.
The flops-profiler profiles the forward pass of a PyTorch model and prints the model graph with the measured profile attached to each module. It shows how latency, flops and parameters are spent in the model and which modules or layers could be the bottleneck. It also outputs the names of the top k modules in terms of aggregated latency, flops, and parameters at depth l with k and l specified by the user. The output profile is computed for each batch of input.
The DeepSpeed flops profiler can be used with the DeepSpeed runtime or as a standalone package.
When using DeepSpeed for model training, the flops profiler can be configured in the deepspeed_config file and no user code change is required.
If using the profiler as a standalone package, one imports the flops_profiler package and use the APIs.
Here is an example for usage in a typical training workflow:
.. code-block:: python
model = Model()
prof = FlopsProfiler(model)
for step, batch in enumerate(data_loader):
if step == profile_step:
prof.start_profile()
loss = model(batch)
if step == profile_step:
flops = prof.get_total_flops(as_string=True)
params = prof.get_total_params(as_string=True)
prof.print_model_profile(profile_step=profile_step)
prof.end_profile()
loss.backward()
optimizer.step()
To profile a trained model in inference, use the `get_model_profile` API.
Args:
object (torch.nn.Module): The PyTorch model to profile.
"""
def __init__(self, model, ds_engine=None):
self.model = model
self.ds_engine = ds_engine
self.started = False
self.func_patched = False
def start_profile(self, ignore_list=None):
"""Starts profiling.
Extra attributes are added recursively to all the modules and the profiled torch.nn.functionals are monkey patched.
Args:
ignore_list (list, optional): the list of modules to ignore while profiling. Defaults to None.
"""
self.reset_profile()
_patch_functionals()
_patch_tensor_methods()
def register_module_hooks(module, ignore_list):
if ignore_list and type(module) in ignore_list:
return
# if computing the flops of a module directly
if type(module) in MODULE_HOOK_MAPPING:
if not hasattr(module, "__flops_handle__"):
module.__flops_handle__ = module.register_forward_hook(MODULE_HOOK_MAPPING[type(module)])
return
# if computing the flops of the functionals in a module
def pre_hook(module, input):
module_flop_count.append([])
module_mac_count.append([])
if not hasattr(module, "__pre_hook_handle__"):
module.__pre_hook_handle__ = module.register_forward_pre_hook(pre_hook)
def post_hook(module, input, output):
if module_flop_count:
module.__flops__ += sum([elem[1] for elem in module_flop_count[-1]])
module_flop_count.pop()
module.__macs__ += sum([elem[1] for elem in module_mac_count[-1]])
module_mac_count.pop()
if not hasattr(module, "__post_hook_handle__"):
module.__post_hook_handle__ = module.register_forward_hook(post_hook)
def start_time_hook(module, input):
get_accelerator().synchronize()
module.__start_time__ = time.time()
if not hasattr(module, "__start_time_hook_handle"):
module.__start_time_hook_handle__ = module.register_forward_pre_hook(start_time_hook)
def end_time_hook(module, input, output):
get_accelerator().synchronize()
module.__duration__ += time.time() - module.__start_time__
if not hasattr(module, "__end_time_hook_handle__"):
module.__end_time_hook_handle__ = module.register_forward_hook(end_time_hook)
self.model.apply(partial(register_module_hooks, ignore_list=ignore_list))
self.started = True
self.func_patched = True
def stop_profile(self):
"""Stop profiling.
All torch.nn.functionals are restored to their originals.
"""
if self.started and self.func_patched:
_reload_functionals()
_reload_tensor_methods()
self.func_patched = False
def remove_profile_attrs(module):
if hasattr(module, "__pre_hook_handle__"):
module.__pre_hook_handle__.remove()
del module.__pre_hook_handle__
if hasattr(module, "__post_hook_handle__"):
module.__post_hook_handle__.remove()
del module.__post_hook_handle__
if hasattr(module, "__flops_handle__"):
module.__flops_handle__.remove()
del module.__flops_handle__
if hasattr(module, "__start_time_hook_handle__"):
module.__start_time_hook_handle__.remove()
del module.__start_time_hook_handle__
if hasattr(module, "__end_time_hook_handle__"):
module.__end_time_hook_handle__.remove()
del module.__end_time_hook_handle__
self.model.apply(remove_profile_attrs)
def reset_profile(self):
"""Resets the profiling.
Adds or resets the extra attributes.
"""
def add_or_reset_attrs(module):
module.__flops__ = 0
module.__macs__ = 0
module.__params__ = sum(p.numel() for p in module.parameters())
module.__start_time__ = 0
module.__duration__ = 0
self.model.apply(add_or_reset_attrs)
def end_profile(self):
"""Ends profiling.
The added attributes and handles are removed recursively on all the modules.
"""
if not self.started:
return
self.stop_profile()
self.started = False
def remove_profile_attrs(module):
if hasattr(module, "__flops__"):
del module.__flops__
if hasattr(module, "__macs__"):
del module.__macs__
if hasattr(module, "__params__"):
del module.__params__
if hasattr(module, "__start_time__"):
del module.__start_time__
if hasattr(module, "__duration__"):
del module.__duration__
self.model.apply(remove_profile_attrs)
def get_total_flops(self, as_string=False):
"""Returns the total flops of the model.
Args:
as_string (bool, optional): whether to output the flops as string. Defaults to False.
Returns:
The number of multiply-accumulate operations of the model forward pass.
"""
total_flops = get_module_flops(self.model)
return num_to_string(total_flops) if as_string else total_flops
def get_total_macs(self, as_string=False):
"""Returns the total MACs of the model.
Args:
as_string (bool, optional): whether to output the flops as string. Defaults to False.
Returns:
The number of multiply-accumulate operations of the model forward pass.
"""
total_macs = get_module_macs(self.model)
return macs_to_string(total_macs) if as_string else total_macs
def get_total_duration(self, as_string=False):
"""Returns the total duration of the model forward pass.
Args:
as_string (bool, optional): whether to output the duration as string. Defaults to False.
Returns:
The latency of the model forward pass.
"""
total_duration = get_module_duration(self.model)
return duration_to_string(total_duration) if as_string else total_duration
def get_total_params(self, as_string=False):
"""Returns the total parameters of the model.
Args:
as_string (bool, optional): whether to output the parameters as string. Defaults to False.
Returns:
The number of parameters in the model.
"""
return params_to_string(self.model.__params__) if as_string else self.model.__params__
def print_model_profile(self, profile_step=1, module_depth=-1, top_modules=1, detailed=True, output_file=None):
"""Prints the model graph with the measured profile attached to each module.
Args:
profile_step (int, optional): The global training step at which to profile. Note that warm up steps are needed for accurate time measurement.
module_depth (int, optional): The depth of the model to which to print the aggregated module information. When set to -1, it prints information from the top to the innermost modules (the maximum depth).
top_modules (int, optional): Limits the aggregated profile output to the number of top modules specified.
detailed (bool, optional): Whether to print the detailed model profile.
output_file (str, optional): Path to the output file. If None, the profiler prints to stdout.
"""
if not self.started:
return
import sys
import os.path
original_stdout = None
f = None
if output_file and output_file != "":
dir_path = os.path.dirname(os.path.abspath(output_file))
if not os.path.exists(dir_path):
os.makedirs(dir_path)
original_stdout = sys.stdout
f = open(output_file, "w")
sys.stdout = f
total_flops = self.get_total_flops()
total_macs = self.get_total_macs()
total_duration = self.get_total_duration()
total_params = self.get_total_params()
self.flops = total_flops
self.macs = total_macs
self.params = total_params
print("\n-------------------------- DeepSpeed Flops Profiler --------------------------")
print(f'Profile Summary at step {profile_step}:')
print(
"Notations:\ndata parallel size (dp_size), model parallel size(mp_size),\nnumber of parameters (params), number of multiply-accumulate operations(MACs),\nnumber of floating-point operations (flops), floating-point operations per second (FLOPS),\nfwd latency (forward propagation latency), bwd latency (backward propagation latency),\nstep (weights update latency), iter latency (sum of fwd, bwd and step latency)\n"
)
if self.ds_engine:
print('{:<60} {:<8}'.format('world size: ', self.ds_engine.world_size))
print('{:<60} {:<8}'.format('data parallel size: ', self.ds_engine.dp_world_size))
print('{:<60} {:<8}'.format('model parallel size: ', self.ds_engine.mp_world_size))
print('{:<60} {:<8}'.format('batch size per GPU: ', self.ds_engine.train_micro_batch_size_per_gpu()))
print('{:<60} {:<8}'.format('params per gpu: ', params_to_string(total_params)))
print('{:<60} {:<8}'.format(
'params of model = params per GPU * mp_size: ',
params_to_string(total_params * ((self.ds_engine.mp_world_size) if self.ds_engine else 1))))
print('{:<60} {:<8}'.format('fwd MACs per GPU: ', macs_to_string(total_macs)))
print('{:<60} {:<8}'.format('fwd flops per GPU: ', num_to_string(total_flops)))
print('{:<60} {:<8}'.format(
'fwd flops of model = fwd flops per GPU * mp_size: ',
num_to_string(total_flops * ((self.ds_engine.mp_world_size) if self.ds_engine else 1))))
fwd_latency = self.get_total_duration()
if self.ds_engine and self.ds_engine.wall_clock_breakdown():
fwd_latency = self.ds_engine.timers('forward').elapsed(False) / 1000.0
print('{:<60} {:<8}'.format('fwd latency: ', duration_to_string(fwd_latency)))
print('{:<60} {:<8}'.format('fwd FLOPS per GPU = fwd flops per GPU / fwd latency: ',
flops_to_string(total_flops / fwd_latency)))
if self.ds_engine and self.ds_engine.wall_clock_breakdown():
bwd_latency = self.ds_engine.timers('backward').elapsed(False) / 1000.0
step_latency = self.ds_engine.timers('step').elapsed(False) / 1000.0
print('{:<60} {:<8}'.format('bwd latency: ', duration_to_string(bwd_latency)))
print('{:<60} {:<8}'.format('bwd FLOPS per GPU = 2 * fwd flops per GPU / bwd latency: ',
flops_to_string(2 * total_flops / bwd_latency)))
print('{:<60} {:<8}'.format('fwd+bwd FLOPS per GPU = 3 * fwd flops per GPU / (fwd+bwd latency): ',
flops_to_string(3 * total_flops / (fwd_latency + bwd_latency))))
print('{:<60} {:<8}'.format('step latency: ', duration_to_string(step_latency)))
iter_latency = fwd_latency + bwd_latency + step_latency
print('{:<60} {:<8}'.format('iter latency: ', duration_to_string(iter_latency)))
print('{:<60} {:<8}'.format('FLOPS per GPU = 3 * fwd flops per GPU / iter latency: ',
flops_to_string(3 * total_flops / iter_latency)))
samples_per_iter = self.ds_engine.train_micro_batch_size_per_gpu() * self.ds_engine.world_size
print('{:<60} {:<8.2f}'.format('samples/second: ', samples_per_iter / iter_latency))
def flops_repr(module):
params = module.__params__
flops = get_module_flops(module)
macs = get_module_macs(module)
items = [
params_to_string(params),
"{:.2%} Params".format(params / total_params if total_params else 0),
macs_to_string(macs),
"{:.2%} MACs".format(0.0 if total_macs == 0 else macs / total_macs),
]
duration = get_module_duration(module)
items.append(duration_to_string(duration))
items.append("{:.2%} latency".format(0.0 if total_duration == 0 else duration / total_duration))
items.append(flops_to_string(0.0 if duration == 0 else flops / duration))
items.append(module.original_extra_repr())
return ", ".join(items)
def add_extra_repr(module):
flops_extra_repr = flops_repr.__get__(module)
if module.extra_repr != flops_extra_repr:
module.original_extra_repr = module.extra_repr
module.extra_repr = flops_extra_repr
assert module.extra_repr != module.original_extra_repr
def del_extra_repr(module):
if hasattr(module, "original_extra_repr"):
module.extra_repr = module.original_extra_repr
del module.original_extra_repr
self.model.apply(add_extra_repr)
print("\n----------------------------- Aggregated Profile per GPU -----------------------------")
self.print_model_aggregated_profile(module_depth=module_depth, top_modules=top_modules)
if detailed:
print("\n------------------------------ Detailed Profile per GPU ------------------------------")
print(
"Each module profile is listed after its name in the following order: \nparams, percentage of total params, MACs, percentage of total MACs, fwd latency, percentage of total fwd latency, fwd FLOPS"
)
print(
"\nNote: 1. A module can have torch.nn.module or torch.nn.functional to compute logits (e.g. CrossEntropyLoss). They are not counted as submodules, thus not to be printed out. However they make up the difference between a parent's MACs (or latency) and the sum of its submodules'.\n2. Number of floating-point operations is a theoretical estimation, thus FLOPS computed using that could be larger than the maximum system throughput.\n3. The fwd latency listed in the top module's profile is directly captured at the module forward function in PyTorch, thus it's less than the fwd latency shown above which is captured in DeepSpeed.\n"
)
print(self.model)
self.model.apply(del_extra_repr)
print("------------------------------------------------------------------------------")
if output_file:
sys.stdout = original_stdout
f.close()
def print_model_aggregated_profile(self, module_depth=-1, top_modules=1):
"""Prints the names of the top top_modules modules in terms of aggregated time, flops, and parameters at depth module_depth.
Args:
module_depth (int, optional): the depth of the modules to show. Defaults to -1 (the innermost modules).
top_modules (int, optional): the number of top modules to show. Defaults to 1.
"""
info = {}
if not hasattr(self.model, "__flops__"):
print("no __flops__ attribute in the model, call this function after start_profile and before end_profile")
return
def walk_module(module, curr_depth, info):
if curr_depth not in info:
info[curr_depth] = {}
if module.__class__.__name__ not in info[curr_depth]:
info[curr_depth][module.__class__.__name__] = [
0,
0,
0,
] # macs, params, time
info[curr_depth][module.__class__.__name__][0] += get_module_macs(module)
info[curr_depth][module.__class__.__name__][1] += module.__params__
info[curr_depth][module.__class__.__name__][2] += get_module_duration(module)
has_children = len(module._modules.items()) != 0
if has_children:
for child in module.children():
walk_module(child, curr_depth + 1, info)
walk_module(self.model, 0, info)
depth = module_depth
if module_depth == -1:
depth = len(info) - 1
print(f'Top {top_modules} modules in terms of params, MACs or fwd latency at different model depths:')
for d in range(depth):
num_items = min(top_modules, len(info[d]))
sort_macs = {
k: macs_to_string(v[0])
for k, v in sorted(info[d].items(), key=lambda item: item[1][0], reverse=True)[:num_items]
}
sort_params = {
k: params_to_string(v[1])
for k, v in sorted(info[d].items(), key=lambda item: item[1][1], reverse=True)[:num_items]
}
sort_time = {
k: duration_to_string(v[2])
for k, v in sorted(info[d].items(), key=lambda item: item[1][2], reverse=True)[:num_items]
}
print(f"depth {d}:")
print(f" params - {sort_params}")
print(f" MACs - {sort_macs}")
print(f" fwd latency - {sort_time}")
def _prod(dims):
p = 1
for v in dims:
p *= v
return p
def _linear_flops_compute(input, weight, bias=None):
out_features = weight.shape[0]
macs = input.numel() * out_features
return 2 * macs, macs
def _relu_flops_compute(input, inplace=False):
return input.numel(), 0
def _prelu_flops_compute(input: Tensor, weight: Tensor):
return input.numel(), 0
def _elu_flops_compute(input: Tensor, alpha: float = 1.0, inplace: bool = False):
return input.numel(), 0
def _leaky_relu_flops_compute(input: Tensor, negative_slope: float = 0.01, inplace: bool = False):
return input.numel(), 0
def _relu6_flops_compute(input: Tensor, inplace: bool = False):
return input.numel(), 0
def _silu_flops_compute(input: Tensor, inplace: bool = False):
return input.numel(), 0
def _gelu_flops_compute(input, **kwargs):
return input.numel(), 0
def _pool_flops_compute(input,
kernel_size,
stride=None,
padding=0,
dilation=None,
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
return_indices=None):
return input.numel(), 0
def _conv_flops_compute(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
assert weight.shape[1] * groups == input.shape[1]
batch_size = input.shape[0]
in_channels = input.shape[1]
out_channels = weight.shape[0]
kernel_dims = list(weight.shape[2:])
input_dims = list(input.shape[2:])
length = len(input_dims)
paddings = padding if type(padding) is tuple else (padding, ) * length
strides = stride if type(stride) is tuple else (stride, ) * length
dilations = dilation if type(dilation) is tuple else (dilation, ) * length
output_dims = []
for idx, input_dim in enumerate(input_dims):
output_dim = (input_dim + 2 * paddings[idx] - (dilations[idx] *
(kernel_dims[idx] - 1) + 1)) // strides[idx] + 1
output_dims.append(output_dim)
filters_per_channel = out_channels // groups
conv_per_position_macs = int(_prod(kernel_dims)) * in_channels * filters_per_channel
active_elements_count = batch_size * int(_prod(output_dims))
overall_conv_macs = conv_per_position_macs * active_elements_count
overall_conv_flops = 2 * overall_conv_macs
bias_flops = 0
if bias is not None:
bias_flops = out_channels * active_elements_count
return int(overall_conv_flops + bias_flops), int(overall_conv_macs)
def _conv_trans_flops_compute(
input,
weight,
bias=None,
stride=1,
padding=0,
output_padding=0,
groups=1,
dilation=1,
):
batch_size = input.shape[0]
in_channels = input.shape[1]
out_channels = weight.shape[0]
kernel_dims = list(weight.shape[2:])
input_dims = list(input.shape[2:])
length = len(input_dims)
paddings = padding if type(padding) is tuple else (padding, ) * length
strides = stride if type(stride) is tuple else (stride, ) * length
dilations = dilation if type(dilation) is tuple else (dilation, ) * length
output_dims = []
for idx, input_dim in enumerate(input_dims):
output_dim = (input_dim + 2 * paddings[idx] - (dilations[idx] *
(kernel_dims[idx] - 1) + 1)) // strides[idx] + 1
output_dims.append(output_dim)
paddings = padding if type(padding) is tuple else (padding, padding)
strides = stride if type(stride) is tuple else (stride, stride)
dilations = dilation if type(dilation) is tuple else (dilation, dilation)
filters_per_channel = out_channels // groups
conv_per_position_macs = int(_prod(kernel_dims)) * in_channels * filters_per_channel
active_elements_count = batch_size * int(_prod(input_dims))
overall_conv_macs = conv_per_position_macs * active_elements_count
overall_conv_flops = 2 * overall_conv_macs
bias_flops = 0
if bias is not None:
bias_flops = out_channels * batch_size * int(_prod(output_dims))
return int(overall_conv_flops + bias_flops), int(overall_conv_macs)
def _batch_norm_flops_compute(
input,
running_mean,
running_var,
weight=None,
bias=None,
training=False,
momentum=0.1,
eps=1e-05,
):
has_affine = weight is not None
if training:
# estimation
return input.numel() * (5 if has_affine else 4), 0
flops = input.numel() * (2 if has_affine else 1)
return flops, 0
def _layer_norm_flops_compute(
input: Tensor,
normalized_shape: List[int],
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
eps: float = 1e-5,
):
has_affine = weight is not None
# estimation
return input.numel() * (5 if has_affine else 4), 0
def _group_norm_flops_compute(input: Tensor,
num_groups: int,
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
eps: float = 1e-5):
has_affine = weight is not None
# estimation
return input.numel() * (5 if has_affine else 4), 0
def _instance_norm_flops_compute(
input: Tensor,
running_mean: Optional[Tensor] = None,
running_var: Optional[Tensor] = None,
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
use_input_stats: bool = True,
momentum: float = 0.1,
eps: float = 1e-5,
):
has_affine = weight is not None
# estimation
return input.numel() * (5 if has_affine else 4), 0
def _upsample_flops_compute(input, **kwargs):
size = kwargs.get('size', None)
if size is not None:
if isinstance(size, tuple) or isinstance(size, list):
return int(_prod(size)), 0
else:
return int(size), 0
scale_factor = kwargs.get('scale_factor', None)
assert scale_factor is not None, "either size or scale_factor should be defined"
flops = input.numel()
if isinstance(scale_factor, tuple) and len(scale_factor) == len(input):
flops * int(_prod(scale_factor))
else:
flops * scale_factor**len(input)
return flops, 0
def _softmax_flops_compute(input, dim=None, _stacklevel=3, dtype=None):
return input.numel(), 0
def _embedding_flops_compute(
input,
weight,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
):
return 0, 0
def _dropout_flops_compute(input, p=0.5, training=True, inplace=False):
return 0, 0
def _matmul_flops_compute(input, other, *, out=None):
"""
Count flops for the matmul operation.
"""
macs = _prod(input.shape) * other.shape[-1]
return 2 * macs, macs
def _addmm_flops_compute(input, mat1, mat2, *, beta=1, alpha=1, out=None):
"""
Count flops for the addmm operation.
"""
macs = _prod(mat1.shape) * mat2.shape[-1]
return 2 * macs + _prod(input.shape), macs
def _einsum_flops_compute(equation, *operands):
"""
Count flops for the einsum operation.
"""
equation = equation.replace(" ", "")
input_shapes = [o.shape for o in operands]
# Re-map equation so that same equation with different alphabet
# representations will look the same.
letter_order = OrderedDict((k, 0) for k in equation if k.isalpha()).keys()
mapping = {ord(x): 97 + i for i, x in enumerate(letter_order)}
equation = equation.translate(mapping)
np_arrs = [np.zeros(s) for s in input_shapes]
optim = np.einsum_path(equation, *np_arrs, optimize="optimal")[1]
for line in optim.split("\n"):
if "optimized flop" in line.lower():
flop = int(float(line.split(":")[-1]))
return flop, 0
raise NotImplementedError("Unsupported einsum operation.")
def _tensor_addmm_flops_compute(self, mat1, mat2, *, beta=1, alpha=1, out=None):
"""
Count flops for the tensor addmm operation.
"""
macs = _prod(mat1.shape) * mat2.shape[-1]
return 2 * macs + _prod(self.shape), macs
def _mul_flops_compute(input, other, *, out=None):
return _elementwise_flops_compute(input, other)
def _add_flops_compute(input, other, *, alpha=1, out=None):
return _elementwise_flops_compute(input, other)
def _elementwise_flops_compute(input, other):
if not torch.is_tensor(input):
if torch.is_tensor(other):
return _prod(other.shape), 0
else:
return 1, 0
elif not torch.is_tensor(other):
return _prod(input.shape), 0
else:
dim_input = len(input.shape)
dim_other = len(other.shape)
max_dim = max(dim_input, dim_other)
final_shape = []
for i in range(max_dim):
in_i = input.shape[i] if i < dim_input else 1
ot_i = other.shape[i] if i < dim_other else 1
if in_i > ot_i:
final_shape.append(in_i)
else:
final_shape.append(ot_i)
flops = _prod(final_shape)
return flops, 0
def wrapFunc(func, funcFlopCompute):
oldFunc = func
name = func.__str__
old_functions[name] = oldFunc
def newFunc(*args, **kwds):
flops, macs = funcFlopCompute(*args, **kwds)
if module_flop_count:
module_flop_count[-1].append((name, flops))
if module_mac_count and macs:
module_mac_count[-1].append((name, macs))
return oldFunc(*args, **kwds)
newFunc.__str__ = func.__str__
return newFunc
def _patch_functionals():
# FC
F.linear = wrapFunc(F.linear, _linear_flops_compute)
# convolutions
F.conv1d = wrapFunc(F.conv1d, _conv_flops_compute)
F.conv2d = wrapFunc(F.conv2d, _conv_flops_compute)
F.conv3d = wrapFunc(F.conv3d, _conv_flops_compute)
# conv transposed
F.conv_transpose1d = wrapFunc(F.conv_transpose1d, _conv_trans_flops_compute)
F.conv_transpose2d = wrapFunc(F.conv_transpose2d, _conv_trans_flops_compute)
F.conv_transpose3d = wrapFunc(F.conv_transpose3d, _conv_trans_flops_compute)
# activations
F.relu = wrapFunc(F.relu, _relu_flops_compute)
F.prelu = wrapFunc(F.prelu, _prelu_flops_compute)
F.elu = wrapFunc(F.elu, _elu_flops_compute)
F.leaky_relu = wrapFunc(F.leaky_relu, _leaky_relu_flops_compute)
F.relu6 = wrapFunc(F.relu6, _relu6_flops_compute)
if hasattr(F, "silu"):
F.silu = wrapFunc(F.silu, _silu_flops_compute)
F.gelu = wrapFunc(F.gelu, _gelu_flops_compute)
# Normalizations
F.batch_norm = wrapFunc(F.batch_norm, _batch_norm_flops_compute)
F.layer_norm = wrapFunc(F.layer_norm, _layer_norm_flops_compute)
F.instance_norm = wrapFunc(F.instance_norm, _instance_norm_flops_compute)
F.group_norm = wrapFunc(F.group_norm, _group_norm_flops_compute)
# poolings
F.avg_pool1d = wrapFunc(F.avg_pool1d, _pool_flops_compute)
F.avg_pool2d = wrapFunc(F.avg_pool2d, _pool_flops_compute)
F.avg_pool3d = wrapFunc(F.avg_pool3d, _pool_flops_compute)
F.max_pool1d = wrapFunc(F.max_pool1d, _pool_flops_compute)
F.max_pool2d = wrapFunc(F.max_pool2d, _pool_flops_compute)
F.max_pool3d = wrapFunc(F.max_pool3d, _pool_flops_compute)
F.adaptive_avg_pool1d = wrapFunc(F.adaptive_avg_pool1d, _pool_flops_compute)
F.adaptive_avg_pool2d = wrapFunc(F.adaptive_avg_pool2d, _pool_flops_compute)
F.adaptive_avg_pool3d = wrapFunc(F.adaptive_avg_pool3d, _pool_flops_compute)
F.adaptive_max_pool1d = wrapFunc(F.adaptive_max_pool1d, _pool_flops_compute)
F.adaptive_max_pool2d = wrapFunc(F.adaptive_max_pool2d, _pool_flops_compute)
F.adaptive_max_pool3d = wrapFunc(F.adaptive_max_pool3d, _pool_flops_compute)
# upsample
F.upsample = wrapFunc(F.upsample, _upsample_flops_compute)
F.interpolate = wrapFunc(F.interpolate, _upsample_flops_compute)
# softmax
F.softmax = wrapFunc(F.softmax, _softmax_flops_compute)
# embedding
F.embedding = wrapFunc(F.embedding, _embedding_flops_compute)
def _patch_tensor_methods():
torch.matmul = wrapFunc(torch.matmul, _matmul_flops_compute)
torch.Tensor.matmul = wrapFunc(torch.Tensor.matmul, _matmul_flops_compute)
torch.mm = wrapFunc(torch.mm, _matmul_flops_compute)
torch.Tensor.mm = wrapFunc(torch.Tensor.mm, _matmul_flops_compute)
torch.bmm = wrapFunc(torch.bmm, _matmul_flops_compute)
torch.Tensor.bmm = wrapFunc(torch.Tensor.bmm, _matmul_flops_compute)
torch.addmm = wrapFunc(torch.addmm, _addmm_flops_compute)
torch.Tensor.addmm = wrapFunc(torch.Tensor.addmm, _tensor_addmm_flops_compute)
torch.mul = wrapFunc(torch.mul, _mul_flops_compute)
torch.Tensor.mul = wrapFunc(torch.Tensor.mul, _mul_flops_compute)
torch.add = wrapFunc(torch.add, _add_flops_compute)
torch.Tensor.add = wrapFunc(torch.Tensor.add, _add_flops_compute)
torch.einsum = wrapFunc(torch.einsum, _einsum_flops_compute)
torch.baddbmm = wrapFunc(torch.baddbmm, _tensor_addmm_flops_compute)
def _reload_functionals():
# torch.nn.functional does not support importlib.reload()
F.linear = old_functions[F.linear.__str__]
F.conv1d = old_functions[F.conv1d.__str__]
F.conv2d = old_functions[F.conv2d.__str__]
F.conv3d = old_functions[F.conv3d.__str__]
F.conv_transpose1d = old_functions[F.conv_transpose1d.__str__]
F.conv_transpose2d = old_functions[F.conv_transpose2d.__str__]
F.conv_transpose3d = old_functions[F.conv_transpose3d.__str__]
F.relu = old_functions[F.relu.__str__]
F.prelu = old_functions[F.prelu.__str__]
F.elu = old_functions[F.elu.__str__]
F.leaky_relu = old_functions[F.leaky_relu.__str__]
F.relu6 = old_functions[F.relu6.__str__]
if hasattr(F, "silu"):
F.silu = old_functions[F.silu.__str__]
F.gelu = old_functions[F.gelu.__str__]
F.batch_norm = old_functions[F.batch_norm.__str__]
F.layer_norm = old_functions[F.layer_norm.__str__]
F.instance_norm = old_functions[F.instance_norm.__str__]
F.group_norm = old_functions[F.group_norm.__str__]
F.avg_pool1d = old_functions[F.avg_pool1d.__str__]
F.avg_pool2d = old_functions[F.avg_pool2d.__str__]
F.avg_pool3d = old_functions[F.avg_pool3d.__str__]
F.max_pool1d = old_functions[F.max_pool1d.__str__]
F.max_pool2d = old_functions[F.max_pool2d.__str__]
F.max_pool3d = old_functions[F.max_pool3d.__str__]
F.adaptive_avg_pool1d = old_functions[F.adaptive_avg_pool1d.__str__]
F.adaptive_avg_pool2d = old_functions[F.adaptive_avg_pool2d.__str__]
F.adaptive_avg_pool3d = old_functions[F.adaptive_avg_pool3d.__str__]
F.adaptive_max_pool1d = old_functions[F.adaptive_max_pool1d.__str__]
F.adaptive_max_pool2d = old_functions[F.adaptive_max_pool2d.__str__]
F.adaptive_max_pool3d = old_functions[F.adaptive_max_pool3d.__str__]
F.upsample = old_functions[F.upsample.__str__]
F.interpolate = old_functions[F.interpolate.__str__]
F.softmax = old_functions[F.softmax.__str__]
F.embedding = old_functions[F.embedding.__str__]
def _reload_tensor_methods():
torch.matmul = old_functions[torch.matmul.__str__]
torch.Tensor.matmul = old_functions[torch.Tensor.matmul.__str__]
torch.mm = old_functions[torch.mm.__str__]
torch.Tensor.mm = old_functions[torch.Tensor.mm.__str__]
torch.bmm = old_functions[torch.matmul.__str__]
torch.Tensor.bmm = old_functions[torch.Tensor.bmm.__str__]
torch.addmm = old_functions[torch.addmm.__str__]
torch.Tensor.addmm = old_functions[torch.Tensor.addmm.__str__]
torch.mul = old_functions[torch.mul.__str__]
torch.Tensor.mul = old_functions[torch.Tensor.mul.__str__]
torch.add = old_functions[torch.add.__str__]
torch.Tensor.add = old_functions[torch.Tensor.add.__str__]
torch.einsum = old_functions[torch.einsum.__str__]
torch.baddbmm = old_functions[torch.baddbmm.__str__]
def _rnn_flops(flops, rnn_module, w_ih, w_hh, input_size):
# matrix matrix mult ih state and internal state
flops += w_ih.shape[0] * w_ih.shape[1]
# matrix matrix mult hh state and internal state
flops += w_hh.shape[0] * w_hh.shape[1]
if isinstance(rnn_module, (nn.RNN, nn.RNNCell)):
# add both operations
flops += rnn_module.hidden_size
elif isinstance(rnn_module, (nn.GRU, nn.GRUCell)):
# hadamard of r
flops += rnn_module.hidden_size
# adding operations from both states
flops += rnn_module.hidden_size * 3
# last two hadamard _product and add
flops += rnn_module.hidden_size * 3
elif isinstance(rnn_module, (nn.LSTM, nn.LSTMCell)):
# adding operations from both states
flops += rnn_module.hidden_size * 4
# two hadamard _product and add for C state
flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size
# final hadamard
flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size
return flops
def _rnn_forward_hook(rnn_module, input, output):
flops = 0
# input is a tuple containing a sequence to process and (optionally) hidden state
inp = input[0]
batch_size = inp.shape[0]
seq_length = inp.shape[1]
num_layers = rnn_module.num_layers
for i in range(num_layers):
w_ih = rnn_module.__getattr__("weight_ih_l" + str(i))
w_hh = rnn_module.__getattr__("weight_hh_l" + str(i))
if i == 0:
input_size = rnn_module.input_size
else:
input_size = rnn_module.hidden_size
flops = _rnn_flops(flops, rnn_module, w_ih, w_hh, input_size)
if rnn_module.bias:
b_ih = rnn_module.__getattr__("bias_ih_l" + str(i))
b_hh = rnn_module.__getattr__("bias_hh_l" + str(i))
flops += b_ih.shape[0] + b_hh.shape[0]
flops *= batch_size
flops *= seq_length
if rnn_module.bidirectional:
flops *= 2
rnn_module.__flops__ += int(flops)
def _rnn_cell_forward_hook(rnn_cell_module, input, output):
flops = 0
inp = input[0]
batch_size = inp.shape[0]
w_ih = rnn_cell_module.__getattr__("weight_ih")
w_hh = rnn_cell_module.__getattr__("weight_hh")
input_size = inp.shape[1]
flops = _rnn_flops(flops, rnn_cell_module, w_ih, w_hh, input_size)
if rnn_cell_module.bias:
b_ih = rnn_cell_module.__getattr__("bias_ih")
b_hh = rnn_cell_module.__getattr__("bias_hh")
flops += b_ih.shape[0] + b_hh.shape[0]
flops *= batch_size
rnn_cell_module.__flops__ += int(flops)
MODULE_HOOK_MAPPING = {
# RNN
nn.RNN: _rnn_forward_hook,
nn.GRU: _rnn_forward_hook,
nn.LSTM: _rnn_forward_hook,
nn.RNNCell: _rnn_cell_forward_hook,
nn.LSTMCell: _rnn_cell_forward_hook,
nn.GRUCell: _rnn_cell_forward_hook,
}
def num_to_string(num, precision=2):
if num // 10**9 > 0:
return str(round(num / 10.0**9, precision)) + " G"
elif num // 10**6 > 0:
return str(round(num / 10.0**6, precision)) + " M"
elif num // 10**3 > 0:
return str(round(num / 10.0**3, precision)) + " K"
else:
return str(num)
def macs_to_string(macs, units=None, precision=2):
if units is None:
if macs // 10**9 > 0:
return str(round(macs / 10.0**9, precision)) + " GMACs"
elif macs // 10**6 > 0:
return str(round(macs / 10.0**6, precision)) + " MMACs"
elif macs // 10**3 > 0:
return str(round(macs / 10.0**3, precision)) + " KMACs"
else:
return str(macs) + " MACs"
else:
if units == "GMACs":
return str(round(macs / 10.0**9, precision)) + " " + units
elif units == "MMACs":
return str(round(macs / 10.0**6, precision)) + " " + units
elif units == "KMACs":
return str(round(macs / 10.0**3, precision)) + " " + units
else:
return str(macs) + " MACs"
def number_to_string(num, units=None, precision=2):
if units is None:
if num // 10**9 > 0:
return str(round(num / 10.0**9, precision)) + " G"
elif num // 10**6 > 0:
return str(round(num / 10.0**6, precision)) + " M"
elif num // 10**3 > 0:
return str(round(num / 10.0**3, precision)) + " K"
else:
return str(num) + " "
else:
if units == "G":
return str(round(num / 10.0**9, precision)) + " " + units
elif units == "M":
return str(round(num / 10.0**6, precision)) + " " + units
elif units == "K":
return str(round(num / 10.0**3, precision)) + " " + units
else:
return str(num) + " "
def flops_to_string(flops, units=None, precision=2):
if units is None:
if flops // 10**12 > 0:
return str(round(flops / 10.0**12, precision)) + " TFLOPS"
if flops // 10**9 > 0:
return str(round(flops / 10.0**9, precision)) + " GFLOPS"
elif flops // 10**6 > 0:
return str(round(flops / 10.0**6, precision)) + " MFLOPS"
elif flops // 10**3 > 0:
return str(round(flops / 10.0**3, precision)) + " KFLOPS"
else:
return str(flops) + " FLOPS"
else:
if units == "TFLOPS":
return str(round(flops / 10.0**12, precision)) + " " + units
if units == "GFLOPS":
return str(round(flops / 10.0**9, precision)) + " " + units
elif units == "MFLOPS":
return str(round(flops / 10.0**6, precision)) + " " + units
elif units == "KFLOPS":
return str(round(flops / 10.0**3, precision)) + " " + units
else:
return str(flops) + " FLOPS"
def params_to_string(params_num, units=None, precision=2):
if units is None:
if params_num // 10**6 > 0:
return str(round(params_num / 10**6, 2)) + " M"
elif params_num // 10**3:
return str(round(params_num / 10**3, 2)) + " k"
else:
return str(params_num)
else:
if units == "M":
return str(round(params_num / 10.0**6, precision)) + " " + units
elif units == "K":
return str(round(params_num / 10.0**3, precision)) + " " + units
else:
return str(params_num)
def duration_to_string(duration, units=None, precision=2):
if units is None:
if duration > 1:
return str(round(duration, precision)) + " s"
elif duration * 10**3 > 1:
return str(round(duration * 10**3, precision)) + " ms"
elif duration * 10**6 > 1:
return str(round(duration * 10**6, precision)) + " us"
else:
return str(duration)
else:
if units == "us":
return str(round(duration * 10.0**6, precision)) + " " + units
elif units == "ms":
return str(round(duration * 10.0**3, precision)) + " " + units
else:
return str(round(duration, precision)) + " s"
# can not iterate over all submodules using self.model.modules()
# since modules() returns duplicate modules only once
def get_module_flops(module):
sum = module.__flops__
# iterate over immediate children modules
for child in module.children():
sum += get_module_flops(child)
return sum
def get_module_macs(module):
sum = module.__macs__
# iterate over immediate children modules
for child in module.children():
sum += get_module_macs(child)
return sum
def get_module_duration(module):
duration = module.__duration__
if duration == 0: # e.g. ModuleList
for m in module.children():
duration += m.__duration__
return duration
def get_model_profile(
model,
input_shape=None,
args=[],
kwargs={},
print_profile=True,
detailed=True,
module_depth=-1,
top_modules=1,
warm_up=1,
as_string=True,
output_file=None,
ignore_modules=None,
):
"""Returns the total floating-point operations, MACs, and parameters of a model.
Example:
.. code-block:: python
model = torchvision.models.alexnet()
batch_size = 256
flops, macs, params = get_model_profile(model=model, input_shape=(batch_size, 3, 224, 224)))
Args:
model ([torch.nn.Module]): the PyTorch model to be profiled.
input_shape (tuple): input shape to the model. If specified, the model takes a tensor with this shape as the only positional argument.
args (list): list of positional arguments to the model.
kwargs (dict): dictionary of keyword arguments to the model.
print_profile (bool, optional): whether to print the model profile. Defaults to True.
detailed (bool, optional): whether to print the detailed model profile. Defaults to True.
module_depth (int, optional): the depth into the nested modules. Defaults to -1 (the inner most modules).
top_modules (int, optional): the number of top modules to print in the aggregated profile. Defaults to 3.
warm_up (int, optional): the number of warm-up steps before measuring the latency of each module. Defaults to 1.
as_string (bool, optional): whether to print the output as string. Defaults to True.
output_file (str, optional): path to the output file. If None, the profiler prints to stdout.
ignore_modules ([type], optional): the list of modules to ignore during profiling. Defaults to None.
Returns:
The number of floating-point operations, multiply-accumulate operations (MACs), and parameters in the model.
"""
assert isinstance(model, nn.Module), "model must be a PyTorch module"
prof = FlopsProfiler(model)
model.eval()
if input_shape is not None:
assert type(input_shape) is tuple, "input_shape must be a tuple"
assert len(input_shape) >= 1, "input_shape must have at least one element"
try:
input = torch.ones(()).new_empty(
(*input_shape, ),
dtype=next(model.parameters()).dtype,
device=next(model.parameters()).device,
)
except StopIteration:
input = torch.ones(()).new_empty((*input_shape, ))
args = [input]
assert (len(args) > 0) or (len(kwargs) > 0), "args and/or kwargs must be specified if input_shape is None"
for _ in range(warm_up):
if kwargs:
_ = model(*args, **kwargs)
else:
_ = model(*args)
prof.start_profile(ignore_list=ignore_modules)
if kwargs:
_ = model(*args, **kwargs)
else:
_ = model(*args)
flops = prof.get_total_flops()
macs = prof.get_total_macs()
params = prof.get_total_params()
if print_profile:
prof.print_model_profile(profile_step=warm_up,
module_depth=module_depth,
top_modules=top_modules,
detailed=detailed,
output_file=output_file)
prof.end_profile()
if as_string:
return number_to_string(flops), macs_to_string(macs), params_to_string(params)
return flops, macs, params
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .constants import *
import copy
from ..runtime.config_utils import get_scalar_param, get_list_param
def get_compression_config(param_dict):
#
output = {}
if COMPRESSION_TRAINING not in param_dict.keys():
param_dict[COMPRESSION_TRAINING] = {}
sub_param_dict = param_dict[COMPRESSION_TRAINING]
output[WEIGHT_QUANTIZATION] = get_weight_quantization(sub_param_dict)
output[ACTIVATION_QUANTIZATION] = get_activation_quantization(sub_param_dict)
output[SPARSE_PRUNING] = get_sparse_pruning(sub_param_dict)
output[ROW_PRUNING] = get_row_pruning(sub_param_dict)
output[HEAD_PRUNING] = get_head_pruning(sub_param_dict)
output[CHANNEL_PRUNING] = get_channel_pruning(sub_param_dict)
output[LAYER_REDUCTION] = get_layer_reduction(sub_param_dict)
return output
def get_layer_reduction(param_dict):
output = {}
output[LAYER_REDUCTION_ENABLED] = LAYER_REDUCTION_ENABLED_DEFAULT
if get_layer_reduction_enabled(param_dict):
output[LAYER_REDUCTION_ENABLED] = get_layer_reduction_enabled(param_dict)
for key, val in get_layer_reduction_params(param_dict).items():
output[key] = val
return output
def get_layer_reduction_enabled(param_dict):
if LAYER_REDUCTION in param_dict.keys():
return get_scalar_param(param_dict[LAYER_REDUCTION], LAYER_REDUCTION_ENABLED, LAYER_REDUCTION_ENABLED_DEFAULT)
else:
return False
def get_layer_reduction_params(param_dict):
if LAYER_REDUCTION in param_dict.keys():
layer_reduction_params = copy.copy(param_dict[LAYER_REDUCTION])
layer_reduction_params.pop(LAYER_REDUCTION_ENABLED)
return layer_reduction_params
else:
return False
def get_quantize_enabled(param_dict):
if COMPRESSION_TRAINING not in param_dict.keys():
return False
sub_param_dict = param_dict[COMPRESSION_TRAINING]
output = get_weight_quantization_shared_parameters(sub_param_dict)
return output[WEIGHT_QUANTIZE_ENABLED]
def get_weight_quantization(param_dict):
output = {}
if WEIGHT_QUANTIZATION not in param_dict.keys():
param_dict[WEIGHT_QUANTIZATION] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
sub_param_dict = param_dict[WEIGHT_QUANTIZATION]
# shared parameters
output[SHARED_PARAMETERS] = get_weight_quantization_shared_parameters(sub_param_dict)
# each sub-groups
if output[SHARED_PARAMETERS][WEIGHT_QUANTIZE_ENABLED]:
assert DIFFERENT_GROUPS in sub_param_dict.keys(
), f"Weigh Quantization is enabled, {DIFFERENT_GROUPS} must be specified"
output[DIFFERENT_GROUPS] = get_weight_quantization_different_groups(sub_param_dict)
return output
def get_weight_quantization_shared_parameters(param_dict):
output = {}
if SHARED_PARAMETERS in param_dict.keys():
sub_param_dict = param_dict[SHARED_PARAMETERS]
output[WEIGHT_QUANTIZE_ENABLED] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_ENABLED,
WEIGHT_QUANTIZE_ENABLED_DEFAULT)
output[WEIGHT_QUANTIZE_KERNEL] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_KERNEL,
WEIGHT_QUANTIZE_KERNEL_DEFAULT)
output[WEIGHT_QUANTIZE_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_SCHEDULE_OFFSET,
WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT)
output[WEIGHT_QUANTIZE_GROUPS] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_GROUPS,
WEIGHT_QUANTIZE_GROUPS_DEFAULT)
output[WEIGHT_QUANTIZE_VERBOSE] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_VERBOSE,
WEIGHT_QUANTIZE_VERBOSE_DEFAULT)
output[WEIGHT_QUANTIZE_TYPE] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_TYPE,
WEIGHT_QUANTIZE_TYPE_DEFAULT)
output[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED] = get_scalar_param(sub_param_dict,
WEIGHT_QUANTIZE_IN_FORWARD_ENABLED,
WEIGHT_QUANTIZE_IN_FORWARD_ENABLED_DEFAULT)
assert output[WEIGHT_QUANTIZE_TYPE] in [
WEIGHT_QUANTIZE_SYMMETRIC, WEIGHT_QUANTIZE_ASYMMETRIC
], f"Invalid weight quantize type. Supported types: [{WEIGHT_QUANTIZE_SYMMETRIC}, {WEIGHT_QUANTIZE_ASYMMETRIC}]"
output[WEIGHT_QUANTIZE_ROUNDING] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_ROUNDING,
WEIGHT_QUANTIZE_ROUNDING_DEFAULT)
assert output[WEIGHT_QUANTIZE_ROUNDING] in [
WEIGHT_QUANTIZE_NEAREST_ROUNDING, WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING
], f"Invalid weight quantize rounding. Supported types: [{WEIGHT_QUANTIZE_NEAREST_ROUNDING}, {WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING}]"
if WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE in sub_param_dict.keys():
output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = get_scalar_param(
sub_param_dict[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED,
WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT)
output[WEIGHT_QUANTIZE_CHANGE_RATIO] = get_scalar_param(
sub_param_dict[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], WEIGHT_QUANTIZE_CHANGE_RATIO,
WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT)
else:
output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT
output[WEIGHT_QUANTIZE_CHANGE_RATIO] = WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT
else:
output[WEIGHT_QUANTIZE_ENABLED] = WEIGHT_QUANTIZE_ENABLED_DEFAULT
output[WEIGHT_QUANTIZE_KERNEL] = WEIGHT_QUANTIZE_KERNEL_DEFAULT
output[WEIGHT_QUANTIZE_SCHEDULE_OFFSET] = WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT
output[WEIGHT_QUANTIZE_GROUPS] = WEIGHT_QUANTIZE_GROUPS_DEFAULT
output[WEIGHT_QUANTIZE_VERBOSE] = WEIGHT_QUANTIZE_VERBOSE_DEFAULT
output[WEIGHT_QUANTIZE_TYPE] = WEIGHT_QUANTIZE_TYPE_DEFAULT
output[WEIGHT_QUANTIZE_ROUNDING] = WEIGHT_QUANTIZE_ROUNDING_DEFAULT
output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT
output[WEIGHT_QUANTIZE_CHANGE_RATIO] = WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT
return output
def get_weight_quantization_different_groups(param_dict):
output = {}
sub_param_dict = param_dict[DIFFERENT_GROUPS]
def get_params(name, group_dict):
assert WEIGHT_QUANTIZE_START_BITS in group_dict.keys(
), f"{WEIGHT_QUANTIZE_START_BITS} must be specified for weight quantization group {name}"
assert WEIGHT_QUANTIZE_TARGET_BITS in group_dict.keys(
), f"{WEIGHT_QUANTIZE_TARGET_BITS} must be specified for weight quantization group {name}"
group_dict[WEIGHT_QUANTIZATION_PERIOD] = get_scalar_param(group_dict, WEIGHT_QUANTIZATION_PERIOD,
WEIGHT_QUANTIZATION_PERIOD_DEFAULT)
return group_dict
for k, v in sub_param_dict.items():
output[k] = {}
output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
return output
def get_activation_quantization(param_dict):
output = {}
if ACTIVATION_QUANTIZATION not in param_dict.keys():
param_dict[ACTIVATION_QUANTIZATION] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
sub_param_dict = param_dict[ACTIVATION_QUANTIZATION]
# shared parameters
output[SHARED_PARAMETERS] = get_activation_quantization_shared_parameters(sub_param_dict)
# each sub-groups
if output[SHARED_PARAMETERS][ACTIVATION_QUANTIZATION_ENABLED]:
assert DIFFERENT_GROUPS in sub_param_dict.keys(
), f"Activation Quantization is enabled, {DIFFERENT_GROUPS} must be specified"
output[DIFFERENT_GROUPS] = get_activation_quantization_different_groups(sub_param_dict)
return output
def get_activation_quantization_shared_parameters(param_dict):
output = {}
if SHARED_PARAMETERS in param_dict.keys():
sub_param_dict = param_dict[SHARED_PARAMETERS]
output[ACTIVATION_QUANTIZATION_ENABLED] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZATION_ENABLED,
ACTIVATION_QUANTIZATION_ENABLED_DEFAULT)
output[ACTIVATION_QUANTIZE_TYPE] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZE_TYPE,
ACTIVATION_QUANTIZE_TYPE_DEFAULT)
assert output[ACTIVATION_QUANTIZE_TYPE] in [
ACTIVATION_QUANTIZE_SYMMETRIC, ACTIVATION_QUANTIZE_ASYMMETRIC
], f"Invalid activation quantize type. Supported types: [{ACTIVATION_QUANTIZE_SYMMETRIC}, {ACTIVATION_QUANTIZE_ASYMMETRIC}]"
output[ACTIVATION_QUANTIZE_RANGE] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZE_RANGE,
ACTIVATION_QUANTIZE_RANGE_DEFAULT)
assert output[ACTIVATION_QUANTIZE_RANGE] in [
ACTIVATION_QUANTIZE_RANGE_DYNAMIC, ACTIVATION_QUANTIZE_RANGE_STATIC
], f"Invalid activation quantize range calibration. Supported types: [{ACTIVATION_QUANTIZE_RANGE_DYNAMIC}, {ACTIVATION_QUANTIZE_RANGE_STATIC}]"
output[ACTIVATION_QUANTIZE_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict,
ACTIVATION_QUANTIZE_SCHEDULE_OFFSET,
ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT)
else:
output[ACTIVATION_QUANTIZATION_ENABLED] = ACTIVATION_QUANTIZATION_ENABLED_DEFAULT
output[ACTIVATION_QUANTIZE_TYPE] = ACTIVATION_QUANTIZE_TYPE_DEFAULT
output[ACTIVATION_QUANTIZE_RANGE] = ACTIVATION_QUANTIZE_RANGE_DEFAULT
output[ACTIVATION_QUANTIZE_SCHEDULE_OFFSET] = ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT
return output
def get_activation_quantization_different_groups(param_dict):
output = {}
sub_param_dict = param_dict[DIFFERENT_GROUPS]
def get_params(name, group_dict):
assert ACTIVATION_QUANTIZE_BITS in group_dict.keys(
), f"{ACTIVATION_QUANTIZE_BITS} must be specified for activation quantization group {name}"
return group_dict
for k, v in sub_param_dict.items():
output[k] = {}
output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
return output
def get_sparse_pruning(param_dict):
output = {}
if SPARSE_PRUNING not in param_dict.keys():
param_dict[SPARSE_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
sub_param_dict = param_dict[SPARSE_PRUNING]
# shared parameters
output[SHARED_PARAMETERS] = get_sparse_pruning_shared_parameters(sub_param_dict)
# each sub-groups
if output[SHARED_PARAMETERS][SPARSE_PRUNING_ENABLED] and output[SHARED_PARAMETERS][
SPARSE_PRUNING_METHOD] != SPARSE_PRUNING_METHOD_SNIP_MOMENTUM:
assert DIFFERENT_GROUPS in sub_param_dict.keys(
), f"Sparse Pruning is enabled and not snip_momentum method, {DIFFERENT_GROUPS} must be specified"
output[DIFFERENT_GROUPS] = get_sparse_pruning_different_groups(sub_param_dict)
return output
def get_sparse_pruning_shared_parameters(param_dict):
output = {}
if SHARED_PARAMETERS in param_dict.keys():
sub_param_dict = param_dict[SHARED_PARAMETERS]
output[SPARSE_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_ENABLED,
SPARSE_PRUNING_ENABLED_DEFAULT)
output[SPARSE_PRUNING_METHOD] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_METHOD,
SPARSE_PRUNING_METHOD_DEFAULT)
assert output[SPARSE_PRUNING_METHOD] in [
SPARSE_PRUNING_METHOD_L1, SPARSE_PRUNING_METHOD_TOPK, SPARSE_PRUNING_METHOD_SNIP_MOMENTUM
], f"Invalid sparse pruning method. Supported types: [{SPARSE_PRUNING_METHOD_L1}, {SPARSE_PRUNING_METHOD_TOPK}, {SPARSE_PRUNING_METHOD_SNIP_MOMENTUM}]"
output[SPARSE_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_SCHEDULE_OFFSET,
SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT)
if output[SPARSE_PRUNING_METHOD] == SPARSE_PRUNING_METHOD_SNIP_MOMENTUM:
output[SPARSE_PRUNING_BLOCK_PATTERN] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_BLOCK_PATTERN,
SPARSE_PRUNING_BLOCK_PATTERN_DEFAULT)
output[SPARSE_PRUNING_DENSE_RATIO] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_DENSE_RATIO,
SPARSE_PRUNING_DENSE_RATIO_DEFAULT)
assert output[SPARSE_PRUNING_DENSE_RATIO] > 0 and output[
SPARSE_PRUNING_DENSE_RATIO] < 1, f"Invalid dense_ratio value. Must be less than 1"
output[SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE] = get_scalar_param(
sub_param_dict, SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE, SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE_DEFAULT)
output[SPARSE_PRUNING_EXCLUDED_MODULES] = get_list_param(sub_param_dict, SPARSE_PRUNING_EXCLUDED_MODULES,
SPARSE_PRUNING_EXCLUDED_MODULES_DEFAULT)
output[SPARSE_PRUNING_SCHEDULE_OFFSET_END] = get_scalar_param(sub_param_dict,
SPARSE_PRUNING_SCHEDULE_OFFSET_END,
output[SPARSE_PRUNING_SCHEDULE_OFFSET])
assert output[SPARSE_PRUNING_SCHEDULE_OFFSET] <= output[
SPARSE_PRUNING_SCHEDULE_OFFSET_END], f"Invalid schedule_offset and schedule_offset_end values"
else:
output[SPARSE_PRUNING_ENABLED] = SPARSE_PRUNING_ENABLED_DEFAULT
output[SPARSE_PRUNING_METHOD] = SPARSE_PRUNING_METHOD_DEFAULT
output[SPARSE_PRUNING_SCHEDULE_OFFSET] = SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT
return output
def get_sparse_pruning_different_groups(param_dict):
output = {}
sub_param_dict = param_dict[DIFFERENT_GROUPS]
def get_params(name, group_dict):
assert SPARSE_PRUNING_DENSE_RATIO in group_dict.keys(
), f"{SPARSE_PRUNING_DENSE_RATIO} must be specified for sparse pruning group {name}"
return group_dict
for k, v in sub_param_dict.items():
output[k] = {}
output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
return output
def get_row_pruning(param_dict):
output = {}
if ROW_PRUNING not in param_dict.keys():
param_dict[ROW_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
sub_param_dict = param_dict[ROW_PRUNING]
# shared parameters
output[SHARED_PARAMETERS] = get_row_pruning_shared_parameters(sub_param_dict)
# each sub-groups
if output[SHARED_PARAMETERS][ROW_PRUNING_ENABLED]:
assert DIFFERENT_GROUPS in sub_param_dict.keys(
), f"Row Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
output[DIFFERENT_GROUPS] = get_row_pruning_different_groups(sub_param_dict)
return output
def get_row_pruning_shared_parameters(param_dict):
output = {}
if SHARED_PARAMETERS in param_dict.keys():
sub_param_dict = param_dict[SHARED_PARAMETERS]
output[ROW_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, ROW_PRUNING_ENABLED,
ROW_PRUNING_ENABLED_DEFAULT)
output[ROW_PRUNING_METHOD] = get_scalar_param(sub_param_dict, ROW_PRUNING_METHOD, ROW_PRUNING_METHOD_DEFAULT)
assert output[ROW_PRUNING_METHOD] in [
ROW_PRUNING_METHOD_L1, ROW_PRUNING_METHOD_TOPK
], f"Invalid row pruning method. Supported types: [{ROW_PRUNING_METHOD_L1}, {ROW_PRUNING_METHOD_TOPK}]"
output[ROW_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, ROW_PRUNING_SCHEDULE_OFFSET,
ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT)
else:
output[ROW_PRUNING_ENABLED] = ROW_PRUNING_ENABLED_DEFAULT
output[ROW_PRUNING_METHOD] = ROW_PRUNING_METHOD_DEFAULT
output[ROW_PRUNING_SCHEDULE_OFFSET] = ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT
return output
def get_row_pruning_different_groups(param_dict):
output = {}
sub_param_dict = param_dict[DIFFERENT_GROUPS]
def get_params(name, group_dict):
assert ROW_PRUNING_DENSE_RATIO in group_dict.keys(
), f"{ROW_PRUNING_DENSE_RATIO} must be specified for row pruning group {name}"
return group_dict
for k, v in sub_param_dict.items():
output[k] = {}
output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
return output
def get_head_pruning(param_dict):
output = {}
if HEAD_PRUNING not in param_dict.keys():
param_dict[HEAD_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
sub_param_dict = param_dict[HEAD_PRUNING]
# shared parameters
output[SHARED_PARAMETERS] = get_head_pruning_shared_parameters(sub_param_dict)
# each sub-groups
if output[SHARED_PARAMETERS][HEAD_PRUNING_ENABLED]:
assert DIFFERENT_GROUPS in sub_param_dict.keys(
), f"Head Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
output[DIFFERENT_GROUPS] = get_head_pruning_different_groups(sub_param_dict)
return output
def get_head_pruning_shared_parameters(param_dict):
output = {}
if SHARED_PARAMETERS in param_dict.keys():
sub_param_dict = param_dict[SHARED_PARAMETERS]
output[HEAD_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, HEAD_PRUNING_ENABLED,
HEAD_PRUNING_ENABLED_DEFAULT)
output[HEAD_PRUNING_METHOD] = get_scalar_param(sub_param_dict, HEAD_PRUNING_METHOD,
HEAD_PRUNING_METHOD_DEFAULT)
assert output[HEAD_PRUNING_METHOD] in [
HEAD_PRUNING_METHOD_L1, HEAD_PRUNING_METHOD_TOPK
], f"Invalid head pruning method. Supported types: [{HEAD_PRUNING_METHOD_L1}, {HEAD_PRUNING_METHOD_TOPK}]"
output[HEAD_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, HEAD_PRUNING_SCHEDULE_OFFSET,
HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT)
if output[HEAD_PRUNING_ENABLED]:
assert HEAD_PRUNING_NUM_HEADS in sub_param_dict.keys(
), f"{HEAD_PRUNING_NUM_HEADS} must be specified for head pruning"
output[HEAD_PRUNING_NUM_HEADS] = sub_param_dict[HEAD_PRUNING_NUM_HEADS]
else:
output[HEAD_PRUNING_ENABLED] = HEAD_PRUNING_ENABLED_DEFAULT
output[HEAD_PRUNING_METHOD] = HEAD_PRUNING_METHOD_DEFAULT
output[HEAD_PRUNING_SCHEDULE_OFFSET] = HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT
return output
def get_head_pruning_different_groups(param_dict):
output = {}
sub_param_dict = param_dict[DIFFERENT_GROUPS]
def get_params(name, group_dict):
assert HEAD_PRUNING_DENSE_RATIO in group_dict.keys(
), f"dense_ratio must be specified for head pruning group {name}"
return group_dict
for k, v in sub_param_dict.items():
output[k] = {}
output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
return output
def get_channel_pruning(param_dict):
output = {}
if CHANNEL_PRUNING not in param_dict.keys():
param_dict[CHANNEL_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
sub_param_dict = param_dict[CHANNEL_PRUNING]
# shared parameters
output[SHARED_PARAMETERS] = get_channel_pruning_shared_parameters(sub_param_dict)
# each sub-groups
if output[SHARED_PARAMETERS][CHANNEL_PRUNING_ENABLED]:
assert DIFFERENT_GROUPS in sub_param_dict.keys(
), f"Sparse Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
output[DIFFERENT_GROUPS] = get_channel_pruning_different_groups(sub_param_dict)
return output
def get_channel_pruning_shared_parameters(param_dict):
output = {}
if SHARED_PARAMETERS in param_dict.keys():
sub_param_dict = param_dict[SHARED_PARAMETERS]
output[CHANNEL_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_ENABLED,
CHANNEL_PRUNING_ENABLED_DEFAULT)
output[CHANNEL_PRUNING_METHOD] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_METHOD,
CHANNEL_PRUNING_METHOD_DEFAULT)
assert output[CHANNEL_PRUNING_METHOD] in [
CHANNEL_PRUNING_METHOD_L1, CHANNEL_PRUNING_METHOD_TOPK
], f"Invalid channel pruning method. Supported types: [{CHANNEL_PRUNING_METHOD_L1}, {CHANNEL_PRUNING_METHOD_TOPK}]"
output[CHANNEL_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_SCHEDULE_OFFSET,
CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT)
else:
output[CHANNEL_PRUNING_ENABLED] = CHANNEL_PRUNING_ENABLED_DEFAULT
output[CHANNEL_PRUNING_METHOD] = CHANNEL_PRUNING_METHOD_DEFAULT
output[CHANNEL_PRUNING_SCHEDULE_OFFSET] = CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT
return output
def get_channel_pruning_different_groups(param_dict):
output = {}
sub_param_dict = param_dict[DIFFERENT_GROUPS]
def get_params(name, group_dict):
assert CHANNEL_PRUNING_DENSE_RATIO in group_dict.keys(
), f"{CHANNEL_PRUNING_DENSE_RATIO} must be specified for channel pruning group {name}"
return group_dict
for k, v in sub_param_dict.items():
output[k] = {}
output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
return output
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import math
from torch import nn
from torch.nn import init
import deepspeed.comm as dist
from .utils import TopKBinarizer, SymQuantizer, AsymQuantizer, TernaryQuantizer, BinaryQuantizer
from deepspeed.utils import logger
g_mpu = None
class QuantAct(nn.Module):
"""
Class to quantize given activations. Note that when using this function, the input activation quantization range will be fixed for all
tokens/images for inference. This generally will affect some accuracy but achieve better latency performance.
Parameters:
----------
act_range_momentum : float, default 0.95
Momentum for updating the activation quantization range.
quant_mode : str, default 'symmetric'
"""
def __init__(self, act_range_momentum=0.95, quant_mode='symmetric'):
super(QuantAct, self).__init__()
self.act_range_momentum = act_range_momentum
self.quant_mode = quant_mode
if quant_mode == 'symmetric':
self.act_function = SymQuantizer.apply
else:
self.act_function = AsymQuantizer.apply
self.register_buffer('x_min_max', torch.zeros(2))
def forward(self, x, num_bits, *args):
"""
x: the activation that we need to quantize
num_bits: the number of bits we need to quantize the activation to
*args: some extra arguments that are useless but needed for align with the interface of other quantization functions
"""
if self.training:
x_min = x.data.min()
x_max = x.data.max()
# Initialization
if self.x_min_max[0] == self.x_min_max[1]:
self.x_min_max[0] = x_min
self.x_min_max[1] = x_max
# if do not need momentum, please set self.act_range_momentum = 0
self.x_min_max[0] = self.x_min_max[0] * self.act_range_momentum + x_min * (1 - self.act_range_momentum)
self.x_min_max[1] = self.x_min_max[1] * self.act_range_momentum + x_max * (1 - self.act_range_momentum)
x_q = self.act_function(x, num_bits, self.x_min_max[0], self.x_min_max[1])
return x_q
class Embedding_Compress(nn.Embedding):
def __init__(self, *kargs):
super(Embedding_Compress, self).__init__(*kargs)
self.weight.start_bits = None
self.weight.target_bits = None
self.weight.q_period = None
self.weight_quantization_enabled_in_forward = False
self.weight_quantization_enabled = False
def extra_repr(self):
return 'num_embeddings={}, embedding_dim={}, weight_quantization={}'.format(
self.num_embeddings, self.embedding_dim, self.weight.target_bits)
def enable_weight_quantization(self, start_bits, target_bits, quantization_period,
weight_quantization_enabled_in_forward, quantization_type, num_groups):
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = quantization_period
self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward
if self.weight_quantization_enabled_in_forward:
logger.warning(
"************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************"
)
if self.weight.target_bits >= 3:
if quantization_type == 'symmetric':
self.weight_quantizer = SymQuantizer.apply
else:
self.weight_quantizer = AsymQuantizer.apply
elif self.weight.target_bits == 2:
assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for ternary weight quantization'
self.weight_quantizer = TernaryQuantizer.apply
elif self.weight.target_bits == 1:
assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for binary weight quantization'
self.weight_quantizer = BinaryQuantizer.apply
# for embedding, we always use token-wise quantization
self.weight_quantize_num_groups = self.weight.size(0)
def fix_weight_quantization(self):
self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups).data
self.weight_quantization_enabled_in_forward = False
return None
def forward(self, input):
if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled:
weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups)
else:
weight = self.weight
out = nn.functional.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type,
self.scale_grad_by_freq, self.sparse)
return out
class LinearLayer_Compress(nn.Linear):
"""
Linear layer with compression.
"""
def __init__(self, *kargs, bias=True):
super(LinearLayer_Compress, self).__init__(*kargs, bias=bias)
self.sparse_pruning_method = None
self.row_pruning_method = None
self.head_pruning_method = None
self.activation_quantization_method = None
self.weight.start_bits = None
self.weight.target_bits = None
self.weight.q_period = None
self.weight_quantization_enabled_in_forward = False
self.weight_quantization_enabled = False
self.sparse_pruning_enabled = False
self.row_pruning_enabled = False
self.head_pruning_enabled = False
self.activation_quantization_enabled = False
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}, sparse pruning={}, row pruning={}, head pruning={}, activation quantization={}, weight_quantization={}'.format(
self.in_features, self.out_features, self.bias is not None, self.sparse_pruning_method is not None, \
self.row_pruning_method is not None, self.head_pruning_method is not None, self.activation_quantization_method is not None, self.weight.target_bits)
def enable_sparse_pruning(self, ratio, method):
# Here, we support two cases: L1 norm based pruning and topk based pruning
self.sparse_pruning_ratio = ratio
self.sparse_pruning_method = method
if method == 'l1':
weight_norm = torch.abs(self.weight.data)
mask = TopKBinarizer.apply(weight_norm, self.sparse_pruning_ratio, False)
mask = mask.view(self.weight.size())
mask = mask.to(self.weight.device)
elif method == 'topk':
self.sparse_mask_scores = nn.Parameter(torch.Tensor(self.weight.size()))
self.sparse_mask_scores.data = self.sparse_mask_scores.data.to(self.weight.device)
init.kaiming_uniform_(self.sparse_mask_scores, a=math.sqrt(5))
mask = None
else:
raise NotImplementedError
self.register_buffer('sparse_pruning_mask', mask)
def enable_row_pruning(self, ratio, method):
# Here, we support two cases: L1 norm based pruning and topk based pruning
self.row_pruning_ratio = ratio
self.row_pruning_method = method
if method == 'l1':
# compute the l1 norm of each column
weight_norm = torch.norm(self.weight.data, p=1, dim=1)
mask = TopKBinarizer.apply(weight_norm, self.row_pruning_ratio, False)
mask = mask.view(-1, 1)
mask = mask.to(self.weight.device)
elif method == 'topk':
self.row_mask_scores = nn.Parameter(torch.Tensor(self.weight.size(0), 1))
self.row_mask_scores.data = self.row_mask_scores.data.to(self.weight.device)
init.kaiming_uniform_(self.row_mask_scores, a=math.sqrt(5))
mask = None
else:
raise NotImplementedError
self.register_buffer('row_pruning_mask', mask)
def enable_head_pruning(self, ratio, method, num_heads):
# Here, we support only topk based pruning
self.num_heads = num_heads
self.head_pruning_ratio = ratio
self.head_pruning_method = method
if method not in ['topk']:
raise NotImplementedError
else:
self.head_pruning_ratio = ratio
self.head_pruning_scores = nn.Parameter(torch.Tensor(1,
self.num_heads)) # we apply the pruning to O matrix
self.head_pruning_scores.data = self.head_pruning_scores.data.to(self.weight.device)
init.kaiming_uniform_(self.head_pruning_scores, a=math.sqrt(5))
def fix_sparse_pruning_helper(self):
mask = self.get_mask(pruning_type='sparse')
self.weight.data = self.weight.data * mask
del self.sparse_pruning_mask
if self.sparse_pruning_method == 'topk':
del self.sparse_mask_scores
self.sparse_pruning_method = None
self.sparse_pruning_enabled = False
return None
def fix_row_col_pruning_helper(self, mask=None, dim_reduction=False):
# This function is used for row/col pruning
# particularly, if we have two back-to-back layers, F1 and F2; when
# we remove rows from F1, we also need to remove columns from F2
# However, if we only have one layer, F1, then we only need to mask pruned
# rows as 0 in F1
if mask is None:
mask = self.get_mask(pruning_type='row').bool()
if dim_reduction:
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
self.weight = nn.Parameter(self.weight.data[mask.view(-1), :])
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
if self.bias is not None:
self.bias = nn.Parameter(self.bias.data[mask.view(-1)])
self.out_features = self.weight.size(0)
else:
self.weight.data = self.weight.data * mask.view(-1, 1)
if self.bias is not None:
self.bias.data = self.bias.data * mask.view(-1)
del self.row_pruning_mask
if self.row_pruning_method == 'topk':
del self.row_mask_scores
self.row_pruning_method = None
else:
# this is generally for column pruning
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
self.weight = nn.Parameter(self.weight.data[:, mask.view(-1)])
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
self.in_features = self.weight.size(1)
mask = None
self.row_pruning_enabled = False
return mask
def fix_head_pruning_helper(self, mask=None, num_heads=None, dim_reduction=False):
# similar as row/col pruning, head pruning also needs to prune QKV which is associated with O matrix
num_heads = num_heads if num_heads else self.num_heads
if mask is None:
if self.head_pruning_method == 'topk':
mask = self.get_mask(pruning_type='head').bool()
if dim_reduction:
shape = self.weight.size(0)
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
self.weight = nn.Parameter(self.weight.data.t().reshape(num_heads,
-1)[mask.view(-1), :].reshape(-1,
shape).t())
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
else:
shape = self.weight.size()
self.weight.data = (self.weight.data.t().reshape(self.num_heads, -1) * mask.view(-1, 1)).reshape(
shape[1], shape[0]).t()
if self.head_pruning_method == 'topk':
del self.head_pruning_scores
self.head_pruning_method = None
else:
raise NotImplementedError
else:
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
shape = self.weight.size(1)
self.weight = nn.Parameter(self.weight.data.reshape(num_heads, -1)[mask.view(-1), :].reshape(-1, shape))
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
if self.bias is not None:
self.bias = nn.Parameter(self.bias.data.reshape(num_heads, -1)[mask.view(-1), :].reshape(-1))
self.head_pruning_enabled = False
return mask
def get_mask(self, pruning_type='row'):
if pruning_type == 'sparse':
if self.sparse_pruning_method == 'l1':
return self.sparse_pruning_mask.to(self.weight.device)
elif self.sparse_pruning_method == 'topk':
return TopKBinarizer.apply(self.sparse_mask_scores, self.sparse_pruning_ratio, False)
else:
raise NotImplementedError
if pruning_type == 'row':
if self.row_pruning_method == 'l1':
return self.row_pruning_mask.to(self.weight.device)
elif self.row_pruning_method == 'topk':
return TopKBinarizer.apply(self.row_mask_scores, self.row_pruning_ratio, False)
else:
raise NotImplementedError
elif pruning_type == 'head':
if self.head_pruning_method == 'topk':
return TopKBinarizer.apply(self.head_pruning_scores, self.head_pruning_ratio, False)
else:
raise NotImplementedError
else:
raise NotImplementedError
def enable_weight_quantization(self, start_bits, target_bits, quantization_period,
weight_quantization_enabled_in_forward, quantization_type, num_groups):
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = quantization_period
self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward
if self.weight_quantization_enabled_in_forward:
logger.warning(
"************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************"
)
if self.weight.target_bits >= 3:
if quantization_type == 'symmetric':
self.weight_quantizer = SymQuantizer.apply
else:
self.weight_quantizer = AsymQuantizer.apply
elif self.weight.target_bits == 2:
assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for ternary weight quantization'
self.weight_quantizer = TernaryQuantizer.apply
elif self.weight.target_bits == 1:
assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for binary weight quantization'
self.weight_quantizer = BinaryQuantizer.apply
self.weight_quantize_num_groups = num_groups
def fix_weight_quantization(self):
self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups).data
self.weight_quantization_enabled_in_forward = False
return None
def enable_activation_quantization(self, bits, quantization_type, range_calibration):
assert bits in [4, 8], 'Only 4/8 bits activation quantization are supported for now'
self.activation_quantization_bits = bits
self.activation_quantization_method = f"{quantization_type}_{range_calibration}"
if range_calibration == 'static':
self.activation_quantizer = QuantAct(quant_mode=quantization_type)
else:
if quantization_type == 'symmetric':
self.activation_quantizer = SymQuantizer.apply
else:
self.activation_quantizer = AsymQuantizer.apply
def head_pruning_reshape(self, w, mask):
shape = w.shape
return (w.t().reshape(self.num_heads, -1) * mask.view(-1, 1)).reshape(shape[1], shape[0]).t()
def forward(self, input, skip_bias_add=False):
if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled:
weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups)
bias = self.bias
else:
weight = self.weight
bias = self.bias
if self.sparse_pruning_enabled and self.sparse_pruning_method:
mask = self.get_mask(pruning_type='sparse')
weight = weight * mask.view(self.weight.size())
if self.row_pruning_enabled and self.row_pruning_method:
mask = self.get_mask(pruning_type='row')
weight = weight * mask.view(-1, 1)
if bias is not None:
bias = bias * mask.view(-1)
if self.head_pruning_enabled and self.head_pruning_method:
mask = self.get_mask(pruning_type='head')
weight = self.head_pruning_reshape(weight, mask)
if self.activation_quantization_enabled:
if 'dynamic' in self.activation_quantization_method:
num_groups = input.numel() // input.size(-1)
else:
num_groups = 1
input = self.activation_quantizer(input, self.activation_quantization_bits, None, None, num_groups)
if skip_bias_add:
# used for mpu linear layers
output = nn.functional.linear(input, weight, None)
return output, bias
else:
output = nn.functional.linear(input, weight, bias)
return output
class Conv2dLayer_Compress(nn.Conv2d):
"""
Conv2D layer with compression.
"""
def __init__(self, *kargs):
super(Conv2dLayer_Compress, self).__init__(*kargs)
self.sparse_pruning_method = None
self.channel_pruning_method = None
self.activation_quantization_method = None
self.weight.start_bits = None
self.weight.target_bits = None
self.weight.q_period = None
self.weight_quantization_enabled_in_forward = False
self.sparse_pruning_enabled = False
self.channel_pruning_enabled = False
self.activation_quantization_enabled = False
def __repr__(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0, ) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1, ) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0, ) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
if self.padding_mode != 'zeros':
s += ', padding_mode={padding_mode}'
output = s.format(**self.__dict__)
return output + ' sparse pruning={}, channel pruning={}, activation quantization={}, weight_quantization={}'.format(
self.sparse_pruning_method is not None, self.channel_pruning_method is not None,
self.activation_quantization_method is not None, self.weight.target_bits)
def enable_sparse_pruning(self, ratio, method):
self.sparse_pruning_ratio = ratio
self.sparse_pruning_method = method
if method == 'l1':
weight_norm = torch.abs(self.weight.data)
mask = TopKBinarizer.apply(weight_norm, self.sparse_pruning_ratio, False)
mask = mask.view(self.weight.size())
mask = mask.to(self.weight.device)
elif method == 'topk':
self.sparse_mask_scores = nn.Parameter(torch.Tensor(self.weight.size()))
self.sparse_mask_scores.data = self.sparse_mask_scores.data.to(self.weight.device)
init.kaiming_uniform_(self.sparse_mask_scores, a=math.sqrt(5))
mask = None
else:
raise NotImplementedError
self.register_buffer('sparse_pruning_mask', mask)
def enable_channel_pruning(self, ratio, method):
# Here, we support two cases: L1 norm based pruning and topk based pruning
self.channel_pruning_ratio = ratio
self.channel_pruning_method = method
if method == 'l1':
# compute the l1 norm of each conv2d kernel (the last three dimension)
weight_norm = torch.norm(self.weight.data, p=1, dim=[1, 2, 3])
mask = TopKBinarizer.apply(weight_norm, self.channel_pruning_ratio, False)
mask = mask.view(-1, 1, 1, 1)
mask = mask.to(self.weight.device)
elif method == 'topk':
self.channel_mask_scores = nn.Parameter(torch.Tensor(self.weight.size(0), 1, 1, 1))
self.channel_mask_scores.data = self.channel_mask_scores.data.to(self.weight.device)
init.kaiming_uniform_(self.channel_mask_scores, a=math.sqrt(5))
mask = None
else:
raise NotImplementedError
self.register_buffer('channel_pruning_mask', mask)
def fix_sparse_pruning_helper(self):
mask = self.get_mask(pruning_type='sparse')
self.weight.data = self.weight.data * mask
del self.sparse_pruning_mask
if self.sparse_pruning_method == 'topk':
del self.sparse_mask_scores
self.sparse_pruning_method = None
self.sparse_pruning_enabled = False
return None
def fix_channel_pruning_helper(self, mask=None, dim_reduction=False):
if mask is None:
if self.channel_pruning_method in ['l1', 'topk']:
mask = self.get_mask(pruning_type='channel').bool()
if dim_reduction:
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
self.weight = nn.Parameter(self.weight.data[mask.view(-1), ...])
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
if self.bias is not None:
self.bias = nn.Parameter(self.bias.data[mask.view(-1)])
else:
self.weight.data = self.weight.data * mask.view(-1, 1, 1, 1)
if self.bias is not None:
self.bias.data = self.bias.data * mask.view(-1)
del self.channel_pruning_mask
if self.channel_pruning_method == 'topk':
del self.channel_mask_scores
self.channel_pruning_method = None
else:
raise NotImplementedError
else:
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
self.weight = nn.Parameter(self.weight.data[:, mask.view(-1), ...])
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
mask = None
self.channel_pruning_enabled = False
return mask
def get_mask(self, pruning_type='sparse'):
if pruning_type == 'sparse':
if self.sparse_pruning_method == 'l1':
return self.sparse_pruning_mask.to(self.weight.device)
elif self.sparse_pruning_method == 'topk':
return TopKBinarizer.apply(self.sparse_mask_scores, self.sparse_pruning_ratio, False)
else:
raise NotImplementedError
elif pruning_type == 'channel':
if self.channel_pruning_method == 'l1':
return self.channel_pruning_mask.to(self.weight.device)
elif self.channel_pruning_method == 'topk':
return TopKBinarizer.apply(self.channel_mask_scores, self.channel_pruning_ratio, False)
else:
raise NotImplementedError
else:
raise NotImplementedError
def fix_weight_quantization(self):
self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups).data
self.weight_quantization_enabled_in_forward = False
return None
def enable_weight_quantization(self, start_bits, target_bits, quantization_period,
weight_quantization_enabled_in_forward, quantization_type, num_groups):
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = quantization_period
self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward
if self.weight_quantization_enabled_in_forward:
assert self.weight.target_bits >= 4, 'Only >=4 bits weight quantization are supported during forward pass for now'
logger.warning(
"************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************"
)
if quantization_type == 'symmetric':
self.weight_quantizer = SymQuantizer.apply
else:
self.weight_quantizer = AsymQuantizer.apply
self.weight_quantize_num_groups = num_groups
def enable_activation_quantization(self, bits, quantization_type, range_calibration):
assert bits in [4, 8], 'Only 4/8 bits activation quantization are supported for now'
self.activation_quantization_bits = bits
self.activation_quantization_method = f"{quantization_type}_{range_calibration}"
if range_calibration == 'static':
self.activation_quantizer = QuantAct(quant_mode=quantization_type)
else:
if quantization_type == 'symmetric':
self.activation_quantizer = SymQuantizer.apply
else:
self.activation_quantizer = AsymQuantizer.apply
def forward(self, input):
if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled:
weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups)
bias = self.bias
else:
weight = self.weight
bias = self.bias
if self.sparse_pruning_enabled and self.sparse_pruning_method:
mask = self.get_mask(pruning_type='sparse')
weight = weight * mask.view(self.weight.size())
if self.channel_pruning_enabled:
mask = self.get_mask(pruning_type='channel')
weight = weight * mask.view(-1, 1, 1, 1)
if bias is not None:
bias = bias * mask.view(-1)
if self.activation_quantization_enabled:
if 'dynamic' in self.activation_quantization_method:
num_groups = input.numel() // input[0].numel()
else:
num_groups = 1
input = self.activation_quantizer(input, self.activation_quantization_bits, None, None, num_groups)
return nn.functional.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
class BNLayer_Compress(nn.BatchNorm2d):
def fix_channel_pruning_helper(self, mask, dim_reduction=True):
self.weight = nn.Parameter(self.weight.data[mask.view(-1)])
self.bias = nn.Parameter(self.bias.data[mask.view(-1)])
self.running_mean = self.running_mean[mask.view(-1)]
self.running_var = self.running_var[mask.view(-1)]
def _reduce(input_):
"""All-reduce the the input tensor across model parallel group."""
group = g_mpu.get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if dist.get_world_size(group=group) == 1:
return input_
# All-reduce.
dist.all_reduce(input_, group=group)
return input_
def split_tensor_along_last_dim(tensor, num_partitions, contiguous_split_chunks=False):
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
assert tensor.size()[last_dim] % num_partitions == 0
last_dim_size = tensor.size()[last_dim] // num_partitions
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
def _split(input_):
"""Split the tensor along its last dimension and keep the
corresponding slice."""
group = g_mpu.get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if dist.get_world_size(group=group) == 1:
return input_
# Split along last dimension.
world_size = dist.get_world_size(group=group)
input_list = split_tensor_along_last_dim(input_, world_size)
# Note: torch.split does not create contiguous tensors by default.
rank = dist.get_rank(group=group)
output = input_list[rank].contiguous()
return output
def _gather(input_):
"""Gather tensors and concatenate along the last dimension."""
group = g_mpu.get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if dist.get_world_size(group=group) == 1:
return input_
# Size and dimension.
last_dim = input_.dim() - 1
rank = dist.get_rank(group=group)
world_size = dist.get_world_size(group=group)
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
tensor_list[rank] = input_
dist.all_gather(tensor_list, input_, group=group)
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=last_dim).contiguous()
return output
class _CopyToModelParallelRegion(torch.autograd.Function):
"""Pass the input to the model parallel region."""
@staticmethod
def forward(ctx, input_):
return input_
@staticmethod
def backward(ctx, grad_output):
return _reduce(grad_output)
class _ReduceFromModelParallelRegion(torch.autograd.Function):
"""All-reduce the input from the model parallel region."""
@staticmethod
def forward(ctx, input_):
return _reduce(input_)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class _ScatterToModelParallelRegion(torch.autograd.Function):
"""Split the input and keep only the corresponding chuck to the rank."""
@staticmethod
def forward(ctx, input_):
return _split(input_)
@staticmethod
def backward(ctx, grad_output):
return _gather(grad_output)
class _GatherFromModelParallelRegion(torch.autograd.Function):
"""Gather the input from model parallel region and concatenate."""
@staticmethod
def forward(ctx, input_):
return _gather(input_)
@staticmethod
def backward(ctx, grad_output):
return _split(grad_output)
# -----------------
# Helper functions.
# -----------------
def copy_to_model_parallel_region(input_):
return _CopyToModelParallelRegion.apply(input_)
def reduce_from_model_parallel_region(input_):
return _ReduceFromModelParallelRegion.apply(input_)
def scatter_to_model_parallel_region(input_):
return _ScatterToModelParallelRegion.apply(input_)
def gather_from_model_parallel_region(input_):
return _GatherFromModelParallelRegion.apply(input_)
class ColumnParallelLinear_Compress(LinearLayer_Compress):
def __init__(self, mpu, input_size, output_size, bias=True, gather_output=True, skip_bias_add=False):
# Keep input parameters
global g_mpu
g_mpu = mpu
self.input_size = input_size
self.output_size = output_size
self.gather_output = gather_output
self.skip_bias_add = skip_bias_add
# Divide the weight matrix along the last dimension.
world_size = mpu.get_model_parallel_world_size()
assert output_size % world_size == 0
self.output_size_per_partition = output_size // world_size
super(ColumnParallelLinear_Compress, self).__init__(self.input_size, self.output_size_per_partition, bias=bias)
def forward(self, input_):
# Set up backprop all-reduce.
input_parallel = copy_to_model_parallel_region(input_)
# Matrix multiply.
if self.skip_bias_add:
output_parallel, bias = super().forward(input_parallel, True)
else:
output_parallel = super().forward(input_parallel)
bias = None
if self.gather_output:
# All-gather across the partitions.
output = gather_from_model_parallel_region(output_parallel)
else:
output = output_parallel
return output, bias
class RowParallelLinear_Compress(LinearLayer_Compress):
def __init__(self, mpu, input_size, output_size, bias=True, input_is_parallel=False, skip_bias_add=False):
# Keep input parameters
global g_mpu
g_mpu = mpu
self.input_size = input_size
self.output_size = output_size
self.input_is_parallel = input_is_parallel
self.skip_bias_add = skip_bias_add
# Divide the weight matrix along the last dimension.
world_size = mpu.get_model_parallel_world_size()
assert input_size % world_size == 0
self.input_size_per_partition = input_size // world_size
super(RowParallelLinear_Compress, self).__init__(self.input_size_per_partition, self.output_size, bias=bias)
def forward(self, input_):
# Set up backprop all-reduce.
if self.input_is_parallel:
input_parallel = input_
else:
input_parallel = scatter_to_model_parallel_region(input_)
# Matrix multiply.
output_parallel, bias = super().forward(input_parallel, True)
# All-reduce across all the partitions.
output_ = reduce_from_model_parallel_region(output_parallel)
if not self.skip_bias_add:
if bias is not None:
output = output_ + bias
else:
output = output_
output_bias = None
else:
output = output_
output_bias = bias
return output, output_bias
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
#########################################
# Compression Methods
# It has several sub-components
# #########################################
COMPRESSION_TRAINING = "compression_training"
SHARED_PARAMETERS = "shared_parameters"
DIFFERENT_GROUPS = "different_groups"
TECHNIQUE_ENABLED = "enabled"
TECHNIQUE_SCHEDULE_OFFSET = "schedule_offset"
TECHNIQUE_SCHEDULE_OFFSET_END = "schedule_offset_end"
DIFFERENT_GROUPS_PARAMETERS = "params"
DIFFERENT_GROUPS_MODULE_SCOPE = "modules"
DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT = "*"
DIFFERENT_GROUPS_RELATED_MODULE_SCOPE = "related_modules"
DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT = None
# COMPRESSION_TRAINING_ENABLED = "enabled"
# COMPRESSION_TRAINING_ENABLED_DEFAULT = False
####
# Layer Reduction
####
LAYER_REDUCTION = "layer_reduction"
LAYER_REDUCTION_ENABLED = "enabled"
LAYER_REDUCTION_ENABLED_DEFAULT = False
KEEP_NUMBER_LAYER = "keep_number_layer"
MODULE_NAME_PREFIX = "module_name_prefix"
TEACHER_LAYER = "teacher_layer"
OTHER_MODULE_NAME = "other_module_name"
####
# Weight Quantization
####
WEIGHT_QUANTIZATION = "weight_quantization"
WEIGHT_QUANTIZATION_PERIOD = "quantization_period"
WEIGHT_QUANTIZATION_PERIOD_DEFAULT = 1
WEIGHT_QUANTIZE_IN_FORWARD_ENABLED = "quantize_weight_in_forward"
WEIGHT_QUANTIZE_IN_FORWARD_ENABLED_DEFAULT = False
WEIGHT_QUANTIZE_ENABLED = TECHNIQUE_ENABLED
WEIGHT_QUANTIZE_ENABLED_DEFAULT = False
WEIGHT_QUANTIZE_KERNEL = "quantizer_kernel"
WEIGHT_QUANTIZE_KERNEL_DEFAULT = False
WEIGHT_QUANTIZE_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT = 0
WEIGHT_QUANTIZE_GROUPS = "quantize_groups"
WEIGHT_QUANTIZE_GROUPS_DEFAULT = 1
WEIGHT_QUANTIZE_VERBOSE = "quantize_verbose"
WEIGHT_QUANTIZE_VERBOSE_DEFAULT = False
WEIGHT_QUANTIZE_TYPE = "quantization_type"
WEIGHT_QUANTIZE_TYPE_DEFAULT = "symmetric"
WEIGHT_QUANTIZE_SYMMETRIC = "symmetric"
WEIGHT_QUANTIZE_ASYMMETRIC = "asymmetric"
WEIGHT_QUANTIZE_ROUNDING = "rounding"
WEIGHT_QUANTIZE_ROUNDING_DEFAULT = "nearest"
WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING = "stochastic"
WEIGHT_QUANTIZE_NEAREST_ROUNDING = "nearest"
# maybe deleted for a cleaner version
WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE = "fp16_mixed_quantize"
WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED = "enabled"
WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT = False
WEIGHT_QUANTIZE_CHANGE_RATIO = "quantize_change_ratio"
WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT = 0.001
WEIGHT_QUANTIZE_START_BITS = "start_bits"
WEIGHT_QUANTIZE_TARGET_BITS = "target_bits"
###
# Activation Quantization
###
ACTIVATION_QUANTIZATION = "activation_quantization"
ACTIVATION_QUANTIZATION_ENABLED = TECHNIQUE_ENABLED
ACTIVATION_QUANTIZATION_ENABLED_DEFAULT = False
ACTIVATION_QUANTIZE_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT = 1000
ACTIVATION_QUANTIZE_TYPE = "quantization_type"
ACTIVATION_QUANTIZE_TYPE_DEFAULT = "symmetric"
ACTIVATION_QUANTIZE_SYMMETRIC = "symmetric"
ACTIVATION_QUANTIZE_ASYMMETRIC = "asymmetric"
ACTIVATION_QUANTIZE_RANGE = 'range_calibration'
ACTIVATION_QUANTIZE_RANGE_DEFAULT = 'dynamic'
ACTIVATION_QUANTIZE_RANGE_STATIC = 'static'
ACTIVATION_QUANTIZE_RANGE_DYNAMIC = 'dynamic'
ACTIVATION_QUANTIZE_BITS = "bits"
###
# Sparse Pruning
###
SPARSE_PRUNING = "sparse_pruning"
SPARSE_PRUNING_ENABLED = TECHNIQUE_ENABLED
SPARSE_PRUNING_ENABLED_DEFAULT = False
SPARSE_PRUNING_METHOD = "method"
SPARSE_PRUNING_METHOD_DEFAULT = "l1"
SPARSE_PRUNING_METHOD_L1 = "l1"
SPARSE_PRUNING_METHOD_TOPK = "topk"
SPARSE_PRUNING_METHOD_SNIP_MOMENTUM = "snip_momentum"
SPARSE_PRUNING_BLOCK_PATTERN = "block_pattern"
SPARSE_PRUNING_BLOCK_PATTERN_DEFAULT = "4x1"
SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE = "schedule_offset_stride"
SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE_DEFAULT = 1
SPARSE_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
SPARSE_PRUNING_SCHEDULE_OFFSET_END = TECHNIQUE_SCHEDULE_OFFSET_END
SPARSE_PRUNING_SCHEDULE_OFFSET_END_DEFAULT = SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT
SPARSE_PRUNING_DENSE_RATIO = "dense_ratio"
SPARSE_PRUNING_DENSE_RATIO_DEFAULT = 0.1
SPARSE_PRUNING_EXCLUDED_MODULES = "excluded_modules"
SPARSE_PRUNING_EXCLUDED_MODULES_DEFAULT = []
###
# Row Pruning
###
ROW_PRUNING = "row_pruning"
ROW_PRUNING_ENABLED = TECHNIQUE_ENABLED
ROW_PRUNING_ENABLED_DEFAULT = False
ROW_PRUNING_METHOD = "method"
ROW_PRUNING_METHOD_DEFAULT = "l1"
ROW_PRUNING_METHOD_L1 = "l1"
ROW_PRUNING_METHOD_TOPK = "topk"
ROW_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
ROW_PRUNING_DENSE_RATIO = "dense_ratio"
###
# Head Pruning
###
HEAD_PRUNING = "head_pruning"
HEAD_PRUNING_ENABLED = TECHNIQUE_ENABLED
HEAD_PRUNING_ENABLED_DEFAULT = False
HEAD_PRUNING_METHOD = "method"
HEAD_PRUNING_METHOD_DEFAULT = "topk"
HEAD_PRUNING_METHOD_L1 = "l1"
HEAD_PRUNING_METHOD_TOPK = "topk"
HEAD_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
HEAD_PRUNING_NUM_HEADS = "num_heads"
HEAD_PRUNING_DENSE_RATIO = "dense_ratio"
###
# Channel Pruning
###
CHANNEL_PRUNING = "channel_pruning"
CHANNEL_PRUNING_ENABLED = TECHNIQUE_ENABLED
CHANNEL_PRUNING_ENABLED_DEFAULT = False
CHANNEL_PRUNING_METHOD = "method"
CHANNEL_PRUNING_METHOD_DEFAULT = "l1"
CHANNEL_PRUNING_METHOD_L1 = "l1"
CHANNEL_PRUNING_METHOD_TOPK = "topk"
CHANNEL_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
CHANNEL_PRUNING_DENSE_RATIO = "dense_ratio"
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .compress import init_compression, redundancy_clean
from .scheduler import compression_scheduler
from .helper import convert_conv1d_to_linear
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from torch import autograd
import math
class TopKBinarizer(autograd.Function):
"""
Top-k Binarizer.
Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}`
is among the k% highest values of S.
Implementation is inspired from:
https://github.com/yaozhewei/MLPruning
"""
@staticmethod
def forward(ctx, inputs: torch.tensor, threshold: float, sigmoid: bool):
"""
Args:
inputs (`torch.FloatTensor`)
The input matrix from which the binarizer computes the binary mask.
threshold (`float`)
The percentage of weights to keep (the rest is pruned).
`threshold` is a float between 0 and 1.
sigmoid (`bool`)
Whether to apply a sigmoid on the threshold
Returns:
mask (`torch.FloatTensor`)
Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is
retained, 0 - the associated weight is pruned).
"""
# Get the subnetwork by sorting the inputs and using the top threshold
if sigmoid:
threshold = torch.sigmoid(threshold).item()
ctx.sigmoid = sigmoid
mask = inputs.clone()
_, idx = inputs.flatten().sort(descending=True)
j = math.ceil(threshold * inputs.numel())
# flat_out and mask access the same memory.
flat_out = mask.flatten()
flat_out[idx[j:]] = 0.
flat_out[idx[:j]] = 1.
ctx.save_for_backward(mask)
return mask
@staticmethod
def backward(ctx, gradOutput):
mask, = ctx.saved_tensors
if ctx.sigmoid:
return gradOutput.clone(), ((gradOutput * mask).sum()).view(-1), None
else:
return gradOutput.clone(), None, None
class SymQuantizer(torch.autograd.Function):
"""
Symmetric quantization
"""
@staticmethod
def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
"""
Args:
inputs (`torch.FloatTensor`)
The input which needs to be quantized
num_bits (int, >=4)
Number of bits to use for quantization
min_value/max_value (torch.FloatTensor)
Used for static activation quantization
num_groups (int)
How many groups to partition the quantization into
Returns:
quantized_input (`torch.FloatTensor`)
Quantized input
"""
assert (min_value is None and max_value is None) or (min_value is not None and max_value is not None
and num_groups == 1)
q_range = 2**num_bits
input_shape = input.shape
if min_value is None:
input = input.reshape(num_groups, -1)
max_input = torch.amax(torch.abs(input), dim=-1).view(num_groups, -1)
else:
max_input = torch.max(min_value.abs(), max_value).view(-1)
scale = 2 * max_input / q_range
output = (input / scale).round().clamp(-q_range // 2, q_range // 2 - 1) * scale
output = output.reshape(input_shape).contiguous()
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input, None, None, None, None
class AsymQuantizer(torch.autograd.Function):
"""
Asymmetric quantization
"""
@staticmethod
def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
"""
Args:
inputs (`torch.FloatTensor`)
The input which needs to be quantized
num_bits (int, >=4)
Number of bits to use for quantization
min_value/max_value (torch.FloatTensor)
Used for static activation quantization
num_groups (int)
How many groups to partition the quantization into
Returns:
quantized_input (`torch.FloatTensor`)
Quantized input
"""
assert (min_value is None and max_value is None) or (min_value is not None and max_value is not None
and num_groups == 1)
q_range = 2**num_bits
input_shape = input.shape
if min_value is None:
input = input.reshape(num_groups, -1)
min_value = input.amin(dim=-1, keepdim=True)
max_value = input.amax(dim=-1, keepdim=True)
scale = (max_value - min_value) / q_range
zero_point = (min_value / scale).round() * scale
output = ((input - zero_point) / scale).round().clamp(0, q_range - 1) * scale + zero_point
output = output.reshape(input_shape).contiguous()
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input, None, None, None, None
class TernaryQuantizer(torch.autograd.Function):
"""
Ternary quantization
"""
@staticmethod
def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
"""
Args:
inputs (`torch.FloatTensor`)
The input which needs to be quantized
num_bits (int)
Dummy variable
min_value/max_value (torch.FloatTensor)
Used for static activation quantization; for now they are dummy variable
num_groups (int)
How many groups to partition the quantization into
Returns:
quantized_input (`torch.FloatTensor`)
Quantized input
"""
assert (min_value is None and max_value is None)
input_flat = input.reshape(num_groups, -1)
n = input_flat.shape[1]
m = input_flat.norm(p=1, dim=1).div(n)
thres = (0.7 * m).view(-1, 1)
pos = (input_flat > thres).type(input.type())
neg = (input_flat < -thres).type(input.type())
mask = (input_flat.abs() > thres).type(input.type())
alpha = ((mask * input_flat).abs().sum(dim=1) / mask.sum(dim=1)).view(-1, 1)
output = alpha * pos - alpha * neg
output = output.reshape(input.shape).contiguous()
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input, None, None, None, None
class BinaryQuantizer(torch.autograd.Function):
"""
Binary quantization
"""
@staticmethod
def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
"""
Args:
inputs (`torch.FloatTensor`)
The input which needs to be quantized
num_bits (int)
Dummy variable
min_value/max_value (torch.FloatTensor)
Used for static activation quantization; for now they are dummy variable
num_groups (int)
How many groups to partition the quantization into
Returns:
quantized_input (`torch.FloatTensor`)
Quantized input
"""
assert (min_value is None and max_value is None)
input_flat = input.reshape(num_groups, -1)
n = input_flat.shape[1]
m = input_flat.norm(p=1, dim=1, keepdim=True).div(n)
output = input_flat.sign().mul(m)
output = output.reshape(input.shape).contiguous()
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input, None, None, None, None
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import re
from .helper import compression_preparation, fix_compression, recursive_getattr, is_module_compressible
from .config import get_compression_config
from ..runtime.config_utils import dict_raise_error_on_duplicate_keys
from .constants import *
import os
import json
try:
import neural_compressor as nc
except ImportError as e:
nc = None
def check_deepspeed_config(config):
if isinstance(config, dict):
return config
elif os.path.exists(config):
return json.load(open(config, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
else:
raise ValueError(
f"Expected a string path to an existing deepspeed config, or a dictionary. Received: {config}")
def get_module_name(group_name, model, key_word, exist_module_name, mpu=None, verbose=True):
'''
get the associated module name from the model based on the key_word provided by users
'''
return_module_name = []
for name, module in model.named_modules():
module_check = is_module_compressible(module, mpu)
if re.search(key_word, name) is not None and module_check:
if name in exist_module_name and verbose:
# logger.warning
raise ValueError(
f"{name} is already added to compression, please check your config file for {group_name}.")
if name not in exist_module_name:
exist_module_name.add(name)
return_module_name.append(name)
return return_module_name, exist_module_name
def get_compress_methods(model, compress_methods, mpu=None):
# extract the compression module for each method in compress_methods
layer_added_compress_methods = []
for method, method_content in compress_methods.items():
if LAYER_REDUCTION in method:
continue
# for loop different methods, i.e., weight quantization, activation quantization etc
exist_module_name = set()
shared_parameters = method_content[SHARED_PARAMETERS] # get all the shared parameters
for group_name, method_parameters in method_content[DIFFERENT_GROUPS].items():
# for loop different groups, i.e., weight quantization group 1, weight quantization group 2 etc
module_name_list = []
related_module_name_list = []
if method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]:
# this is used for head/row/channel pruning, if users provide the related module scope, we can shrink the layer dim for them
# otherwise we just mask those as zeros
for key_word, related_key_words in zip(method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE],
method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]):
module_name, exist_module_name = get_module_name(group_name,
model,
key_word,
exist_module_name,
mpu=mpu)
module_name_list.append(module_name)
tmp_related_module_name_list = []
for rkw in related_key_words:
# related key word can be a list, for instance the QKV for O matrix in Attention
module_name, _ = get_module_name(group_name, model, rkw, set(), mpu=mpu)
tmp_related_module_name_list.append(module_name)
related_module_name_list.append(tmp_related_module_name_list)
else:
for key_word in method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE]:
module_name, exist_module_name = get_module_name(group_name,
model,
key_word,
exist_module_name,
mpu=mpu)
module_name_list.append(module_name)
if module_name_list:
# combine shared parameters with each group
combined_method_parameters = {
**(method_parameters.copy().pop(DIFFERENT_GROUPS_PARAMETERS)),
**shared_parameters
}
compression_item = [module_name_list, related_module_name_list, {method: combined_method_parameters}]
layer_added_compress_methods.append(compression_item)
return layer_added_compress_methods
def init_compression(model, deepspeed_config, teacher_model=None, mpu=None):
"""
Compress a model: replace linear/conv2d layer with deepspeed compression-aware modules
Args:
model (`torch.nn.Module`)
The model to compress.
deepspeed_config (`DeepSpeedConfig`)
The path of ds_config
mpu
The mpu module for Row/Column parallelism
"""
compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config))
if hasattr(model, 'module'):
c_model = model.module
else:
c_model = model
# For layer reduction
if compress_methods[LAYER_REDUCTION][LAYER_REDUCTION_ENABLED]:
assert teacher_model is not None, "Teacher model is required for layer reduction"
student_initialization(c_model, teacher_model, deepspeed_config)
layer_added_compress_methods = get_compress_methods(c_model, compress_methods, mpu=mpu)
compression_preparation(c_model, layer_added_compress_methods, mpu)
# For sparse pruning snip_momentum method
shared_parameters = compress_methods[SPARSE_PRUNING][SHARED_PARAMETERS]
if shared_parameters[SPARSE_PRUNING_ENABLED] and \
shared_parameters[SPARSE_PRUNING_METHOD] == SPARSE_PRUNING_METHOD_SNIP_MOMENTUM:
assert nc is not None, "please ensure the neural_compressor python package is installed by pip or conda if user wants to use snip_momentum sparse pruning"
from .helper import generate_pruners, register_on_step_begin
from nc import WeightPruningConfig
config = WeightPruningConfig(target_sparsity=1 - shared_parameters[SPARSE_PRUNING_DENSE_RATIO],
pattern=shared_parameters[SPARSE_PRUNING_BLOCK_PATTERN],
pruning_frequency=shared_parameters[SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE],
start_step=shared_parameters[SPARSE_PRUNING_SCHEDULE_OFFSET],
end_step=shared_parameters[SPARSE_PRUNING_SCHEDULE_OFFSET_END],
excluded_op_names=shared_parameters[SPARSE_PRUNING_EXCLUDED_MODULES])
pruners = generate_pruners(config, c_model)
c_model.pruners = pruners
register_on_step_begin(c_model)
return model
def redundancy_clean(model, deepspeed_config, mpu=None):
"""
Remove the redundancy of a model
Args:
model (`torch.nn.Module`)
The model to compress.
deepspeed_config (`DeepSpeedConfig`)
The path of ds_config
mpu
The mpu module for Row/Column parallelism
"""
compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config))
if hasattr(model, 'module'):
c_model = model.module
else:
c_model = model
layer_added_compress_methods_tmp = get_compress_methods(c_model, compress_methods, mpu=mpu)
# sort methods
order_list = [
WEIGHT_QUANTIZATION, SPARSE_PRUNING, ROW_PRUNING, HEAD_PRUNING, CHANNEL_PRUNING, ACTIVATION_QUANTIZATION
]
layer_added_compress_methods = sorted(layer_added_compress_methods_tmp,
key=lambda x: order_list.index(list(x[2].keys())[0]))
for module_name_lists, related_module_name_lists, compression_technique in layer_added_compress_methods:
stored_mask = []
need_mask = True if related_module_name_lists else False
for i, mnl in enumerate(module_name_lists):
for module_name in mnl:
mask = fix_compression(c_model, module_name, compression_technique, dim_reduction=need_mask)
if need_mask:
stored_mask.append(mask)
if need_mask:
for rmnl in related_module_name_lists[i]:
for j, module_name in enumerate(rmnl):
mask = fix_compression(c_model,
module_name,
compression_technique,
mask=stored_mask[j],
dim_reduction=True)
return model
def student_initialization(student_model, teacher_model, deepspeed_config):
'''
Given a student model and a teacher model, select the
Args:
student_model (`torch.nn.Module`)
The model we will update weight
teacher_model (`torch.nn.Module`)
The model guide the student to learn
deepspeed_config (`DeepSpeedConfig`)
The path of ds_config
'''
config = get_compression_config(check_deepspeed_config(deepspeed_config))
compress_methods = config[LAYER_REDUCTION]
module_name_prefix = compress_methods[MODULE_NAME_PREFIX]
teacher_layer = compress_methods[TEACHER_LAYER]
student_layer = [i for i in range(len(teacher_layer))]
other_module_name = compress_methods[OTHER_MODULE_NAME]
'''
name_prefix (`str`)
The prefix name before the layer #.
Example 1: bert.encoder.layer, for BERT_base model's prefix name
Example 2: transformer.h, for GPT-2 hugging face prefix name
teacher_layer (`list of integers`)
The layer of teacher will be used for student's reinitializedion
Example 1: [1,3,5,7,9], means we want to matches the 2nd/4th/6th/8th/10th layer of teacher to the first 5 layers of student
student_layer (`list` or None)
The layer of student need to be re-initialized
Example 1: None, means we want to reinitialize all the layers
Example 1: [0,1,2,3,4], means we want to reinitialize the first 5 layers
other_module_name (`list of string`)
The modules will be used for student's reinitializedion
Example 1: ['bert.pooler', 'bert.embeddings', 'classifier'], means we want to apply the weight in teacher's embedding/pooler/classier module to the student
Example 2: ['transformer.w', 'transformer.ln_f', 'lm_head'], means we want to apply the weight in teacher's embedding layers module to the student
Note that teacher_layer should matches student layer
'''
assert len(student_layer) == len(teacher_layer)
for s_name, t_name in zip(student_layer, teacher_layer):
s_module = recursive_getattr(student_model, module_name_prefix + '.' + str(s_name))
t_module = recursive_getattr(teacher_model, module_name_prefix + '.' + str(t_name))
for s_param, t_param in zip(s_module.parameters(), t_module.parameters()):
s_param.data.copy_(t_param.data)
for name in other_module_name:
s_module = recursive_getattr(student_model, name)
t_module = recursive_getattr(teacher_model, name)
print(name)
for s_param, t_param in zip(s_module.parameters(), t_module.parameters()):
s_param.data.copy_(t_param.data)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from .basic_layer import Embedding_Compress, LinearLayer_Compress, Conv2dLayer_Compress, BNLayer_Compress, ColumnParallelLinear_Compress, RowParallelLinear_Compress
from .constants import *
from deepspeed.utils import logger
try:
from neural_compressor.compression import pruner as nc_pruner
except ImportError as e:
nc_pruner = None
def recursive_getattr(model, module_name):
"""
Recursively get the attribute of a module.
Args:
model (`torch.nn.Module`)
The model to get the attribute from.
module_name (`str`)
The name of the module to get the attribute from.
"""
split_list = module_name.split('.')
output = model
for name in split_list:
output = getattr(output, name)
return output
def recursive_setattr(model, module_name, module):
"""
Recursively set the attribute of a module.
Args:
model (`torch.nn.Module`)
The model to set the attribute in.
module_name (`str`)
The name of the module to set the attribute in.
module (`torch.nn.Module`)
The module to set the attribute to.
"""
split_list = module_name.split('.')
output = model
for name in split_list[:-1]:
output = getattr(output, name)
output.__setattr__(split_list[-1], module)
def module_replacement(model, module_name, compression_technique=None, mpu=None):
"""
Replace a module with a new module.
Args:
model (`torch.nn.Module`)
The model to replace the module in.
module_name (`str`)
The name of the module to replace.
compression_technique (`str`)
The compression technique to use for the new module.
"""
# Get the old module
old_module = recursive_getattr(model, module_name)
need_bias = False
if hasattr(old_module, 'bias') and old_module.bias is not None:
need_bias = True
# Initialize the new module
if isinstance(old_module, LinearLayer_Compress) or isinstance(old_module, torch.nn.Linear):
if isinstance(old_module, LinearLayer_Compress):
new_module = old_module
else:
new_module = LinearLayer_Compress(old_module.in_features, old_module.out_features,
bias=need_bias).to(device=old_module.weight.device,
dtype=old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
if need_bias:
new_module.bias.data = old_module.bias.data
elif isinstance(old_module, Conv2dLayer_Compress) or isinstance(old_module, torch.nn.Conv2d):
if isinstance(old_module, Conv2dLayer_Compress):
new_module = old_module
else:
new_module = Conv2dLayer_Compress(old_module.in_channels, old_module.out_channels, old_module.kernel_size, old_module.stride, old_module.padding, \
old_module.dilation, old_module.groups, need_bias, \
old_module.padding_mode).to(device=old_module.weight.device, dtype=old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
if need_bias:
new_module.bias.data = old_module.bias.data
elif isinstance(old_module, torch.nn.BatchNorm2d):
new_module = BNLayer_Compress(old_module.num_features, old_module.eps, old_module.momentum, old_module.affine,
old_module.track_running_stats).to(old_module.weight.device,
old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
if need_bias:
new_module.bias.data = old_module.bias.data
new_module.running_mean.data = old_module.running_mean.data
new_module.running_var.data = old_module.running_var.data
elif isinstance(old_module, Embedding_Compress) or isinstance(old_module, torch.nn.Embedding):
if isinstance(old_module, Embedding_Compress):
new_module = old_module
else:
new_module = Embedding_Compress(old_module.num_embeddings, old_module.embedding_dim, old_module.padding_idx, old_module.max_norm, old_module.norm_type, \
old_module.scale_grad_by_freq, old_module.sparse).to(device=old_module.weight.device, dtype=old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
elif mpu is not None and (isinstance(old_module, ColumnParallelLinear_Compress)
or isinstance(old_module, mpu.ColumnParallelLinear)):
if isinstance(old_module, ColumnParallelLinear_Compress):
new_module = old_module
else:
new_module = ColumnParallelLinear_Compress(mpu,
old_module.input_size,
old_module.output_size,
gather_output=old_module.gather_output,
skip_bias_add=old_module.skip_bias_add,
bias=need_bias).to(device=old_module.weight.device,
dtype=old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
if need_bias:
new_module.bias.data = old_module.bias.data
elif mpu is not None and (isinstance(old_module, RowParallelLinear_Compress)
or isinstance(old_module, mpu.RowParallelLinear)):
if isinstance(old_module, RowParallelLinear_Compress):
new_module = old_module
else:
new_module = RowParallelLinear_Compress(mpu,
old_module.input_size,
old_module.output_size,
input_is_parallel=old_module.input_is_parallel,
skip_bias_add=old_module.skip_bias_add,
bias=need_bias).to(device=old_module.weight.device,
dtype=old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
if need_bias:
new_module.bias.data = old_module.bias.data
else:
new_module = None
if compression_technique is not None:
for k, v in compression_technique.items():
if k == SPARSE_PRUNING:
if v[SPARSE_PRUNING_ENABLED]:
new_module.enable_sparse_pruning(v[SPARSE_PRUNING_DENSE_RATIO], v[SPARSE_PRUNING_METHOD])
elif k == ROW_PRUNING:
if v[ROW_PRUNING_ENABLED]:
new_module.enable_row_pruning(v[ROW_PRUNING_DENSE_RATIO], v[ROW_PRUNING_METHOD])
elif k == HEAD_PRUNING:
if v[HEAD_PRUNING_ENABLED]:
new_module.enable_head_pruning(v[HEAD_PRUNING_DENSE_RATIO], v[HEAD_PRUNING_METHOD],
v[HEAD_PRUNING_NUM_HEADS])
elif k == ACTIVATION_QUANTIZATION:
if v[ACTIVATION_QUANTIZATION_ENABLED]:
new_module.enable_activation_quantization(v[ACTIVATION_QUANTIZE_BITS], v[ACTIVATION_QUANTIZE_TYPE],
v[ACTIVATION_QUANTIZE_RANGE])
elif k == WEIGHT_QUANTIZATION:
if v[WEIGHT_QUANTIZE_ENABLED]:
new_module.enable_weight_quantization(v[WEIGHT_QUANTIZE_START_BITS],
v[WEIGHT_QUANTIZE_TARGET_BITS],
v[WEIGHT_QUANTIZATION_PERIOD],
v[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED],
v[WEIGHT_QUANTIZE_TYPE], v[WEIGHT_QUANTIZE_GROUPS])
elif k == CHANNEL_PRUNING:
if v[CHANNEL_PRUNING_ENABLED]:
new_module.enable_channel_pruning(v[CHANNEL_PRUNING_DENSE_RATIO], v[CHANNEL_PRUNING_METHOD])
else:
raise NotImplementedError('Compression technique {} is not implemented'.format(k))
# Replace the old module with the new one
recursive_setattr(model, module_name, new_module)
def is_module_compressible(module, mpu=None):
ret = isinstance(module, torch.nn.Linear) or \
isinstance(module, torch.nn.Conv2d) or \
isinstance(module, torch.nn.Embedding) or \
isinstance(module, torch.nn.BatchNorm2d)
if mpu is not None:
ret = ret or isinstance(module, mpu.RowParallelLinear) or isinstance(module, mpu.ColumnParallelLinear)
return ret
def compression_preparation(model, compression_technique_list, mpu):
"""
Prepare the compression techniques of a model.
Args:
model (`torch.nn.Module`)
The model to prepare the compression techniques of.
compression_technique_list (`list`)
The list of compression techniques to prepare the model to.
list[]
"""
# Here we first replace all module with our linear wrapper
for module_name, module in model.named_modules():
if is_module_compressible(module, mpu):
module_replacement(model, module_name, mpu=mpu)
for module_name_lists, _, compression_technique in compression_technique_list:
for mnl in module_name_lists:
for module_name in mnl:
module_replacement(model, module_name, compression_technique)
return model
def fix_compression(model, module_name, compression_technique, mask=None, dim_reduction=False):
"""
Fix the compression technique of a module.
Args:
model (`torch.nn.Module`)
The model to fix the compression technique of.
module_name (`str`)
The name of the module to fix the compression technique of.
compression_technique (`str`)
The compression technique to fix the module to.
"""
# Here we can make things much simpler by just replacing the module
module = recursive_getattr(model, module_name)
for k, v in compression_technique.items():
if k == WEIGHT_QUANTIZATION and v[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED] and v[WEIGHT_QUANTIZE_ENABLED]:
return module.fix_weight_quantization()
elif k == SPARSE_PRUNING and v[SPARSE_PRUNING_ENABLED]:
return module.fix_sparse_pruning_helper()
elif k == ROW_PRUNING and (v[ROW_PRUNING_ENABLED] or mask is not None):
return module.fix_row_col_pruning_helper(mask, dim_reduction=dim_reduction)
elif k == HEAD_PRUNING and (v[HEAD_PRUNING_ENABLED] or mask is not None):
return module.fix_head_pruning_helper(mask, v[HEAD_PRUNING_NUM_HEADS], dim_reduction=dim_reduction)
elif k == CHANNEL_PRUNING and (v[CHANNEL_PRUNING_ENABLED] or mask is not None):
return module.fix_channel_pruning_helper(mask, dim_reduction=dim_reduction)
def convert_conv1d_to_linear(model, convert_type):
'''
This is a help function to convert conv1d to linear (e.g., convert GPT2 from HF)
'''
if hasattr(model, 'module'):
c_model = model.module
else:
c_model = model
for name, module in c_model.named_modules():
if isinstance(module, convert_type):
old_module = recursive_getattr(c_model, name)
new_module = torch.nn.Linear(old_module.weight.data.size(0),
old_module.weight.data.size(1),
bias=True if old_module.bias is not None else False)
new_module.weight.data = old_module.weight.data.t().contiguous()
if new_module.bias is not None:
new_module.bias.data = old_module.bias.data.view(-1)
recursive_setattr(c_model, name, new_module)
return model
def generate_pruners(config, model):
"""Generate pruners.
Args:
config (`neural_compressor.WeightPruningConfig`)
The object to the class WeightPruningConfig.
model (`torch.nn.module`)
The torch module object to be pruned.
"""
assert nc_pruner is not None, "please ensure the neural_compressor python package is installed by pip or conda if user wants to use snip_momentum sparse pruning"
from nc_pruner.utils import process_config, parse_to_prune
from nc_pruner.pruners import get_pruner
assert isinstance(model, torch.nn.Module)
pruners_info = process_config(config)
pruners = []
for info in pruners_info:
modules = parse_to_prune(info, model)
if modules == {}:
logger.warning("one pruner hooks no layers, please have a check")
pruners.append(get_pruner(info, modules))
info['modules'] = [key for key in modules.keys()]
info['len_of_modules'] = len(info['modules'])
logger.info(info)
return pruners
def register_on_step_begin(model):
"""Mount on_step_begin to the model.
Args:
model (`torch.nn.module`)
The torch module object to be pruned.
"""
def hook(module, input):
for pruner in module.pruners:
pruner.on_step_begin(0)
hook_handle = model.register_forward_pre_hook(hook)
return hook_handle
def rewrite_optimizer_step(opt: torch.optim.Optimizer):
"""Mount on_before/after_optimizer_step to the optimizer.
Args:
model (`torch.opt.Optimizer`)
The torch optimizer object to be hooked.
"""
def new_step(self, closure=None):
if hasattr(self, "pruners"):
for pruner in self.pruners:
pruner.on_before_optimizer_step()
if closure is not None:
res = self.orig_step(closure)
else:
res = self.orig_step()
if hasattr(self, "pruners"):
for pruner in self.pruners:
pruner.on_after_optimizer_step()
return res
opt.orig_step = opt.step
import types
opt.step = types.MethodType(new_step, opt)
return opt
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .compress import get_module_name
from .constants import *
from .helper import recursive_getattr
from deepspeed.utils import logger
class compression_scheduler():
'''
Used to schedule different compression methods
'''
def __init__(self, model, compression_config):
self.model = model
self.compression_config = compression_config
self.make_init()
self.training_steps = 0
self.weight_quantization_enabled = False
self.verbose = {
WEIGHT_QUANTIZATION: False,
ACTIVATION_QUANTIZATION: False,
SPARSE_PRUNING: False,
HEAD_PRUNING: False,
ROW_PRUNING: False,
CHANNEL_PRUNING: False
}
def make_init(self):
self.different_compression_methods = {}
for method, method_content in self.compression_config.items():
if LAYER_REDUCTION in method:
continue
self.different_compression_methods[method] = {
TECHNIQUE_ENABLED: False,
SHARED_PARAMETERS: None,
DIFFERENT_GROUPS: []
}
exist_module_name = set()
shared_parameters = method_content[SHARED_PARAMETERS]
self.different_compression_methods[method][TECHNIQUE_ENABLED] = shared_parameters[TECHNIQUE_ENABLED]
self.different_compression_methods[method][SHARED_PARAMETERS] = shared_parameters
for group_name, method_parameters in method_content[DIFFERENT_GROUPS].items():
module_name_list = []
for key_word in method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE]:
module_name, exist_module_name = get_module_name(group_name,
self.model,
key_word,
exist_module_name,
verbose=False)
module_name_list.extend(module_name)
if module_name_list:
self.different_compression_methods[method][DIFFERENT_GROUPS].append(
[group_name, module_name_list,
method_parameters.copy().pop('params')])
def check_weight_quantization(self):
# check weight quantization
wq = self.different_compression_methods[WEIGHT_QUANTIZATION]
if not wq[TECHNIQUE_ENABLED]:
return
else:
shared_parameters = wq[SHARED_PARAMETERS]
if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
for group_name, module_name_list, method_parameters in wq[DIFFERENT_GROUPS]:
for module_name in module_name_list:
module = recursive_getattr(self.model, module_name)
module.weight_quantization_enabled = True
if not self.verbose[WEIGHT_QUANTIZATION]:
logger.info(f'Weight quantization is enabled at step {self.training_steps}')
self.weight_quantization_enabled = True
self.verbose[WEIGHT_QUANTIZATION] = True
def check_activation_quantization(self):
# check activation quantization
aq = self.different_compression_methods[ACTIVATION_QUANTIZATION]
if not aq[TECHNIQUE_ENABLED]:
return
else:
shared_parameters = aq[SHARED_PARAMETERS]
if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
for group_name, module_name_list, method_parameters in aq[DIFFERENT_GROUPS]:
for module_name in module_name_list:
module = recursive_getattr(self.model, module_name)
module.activation_quantization_enabled = True
if not self.verbose[ACTIVATION_QUANTIZATION]:
logger.info(f'Activation quantization is enabled at step {self.training_steps}')
self.verbose[ACTIVATION_QUANTIZATION] = True
def check_sparse_pruning(self):
# check sparse pruning
sp = self.different_compression_methods[SPARSE_PRUNING]
if not sp[TECHNIQUE_ENABLED]:
return
else:
shared_parameters = sp[SHARED_PARAMETERS]
if self.training_steps >= shared_parameters[
TECHNIQUE_SCHEDULE_OFFSET] and self.training_steps <= shared_parameters[
TECHNIQUE_SCHEDULE_OFFSET_END]:
for group_name, module_name_list, method_parameters in sp[DIFFERENT_GROUPS]:
for module_name in module_name_list:
module = recursive_getattr(self.model, module_name)
module.sparse_pruning_enabled = True
if not self.verbose[SPARSE_PRUNING]:
logger.info(f'Sparse pruning is enabled at step {self.training_steps}')
self.verbose[SPARSE_PRUNING] = True
def check_head_pruning(self):
# check head pruning
hp = self.different_compression_methods[HEAD_PRUNING]
if not hp[TECHNIQUE_ENABLED]:
return
else:
shared_parameters = hp[SHARED_PARAMETERS]
if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
for group_name, module_name_list, method_parameters in hp[DIFFERENT_GROUPS]:
for module_name in module_name_list:
module = recursive_getattr(self.model, module_name)
module.head_pruning_enabled = True
if not self.verbose[HEAD_PRUNING]:
logger.info(f'Head pruning is enabled at step {self.training_steps}')
self.verbose[HEAD_PRUNING] = True
def check_row_pruning(self):
# check row pruning
rp = self.different_compression_methods[ROW_PRUNING]
if not rp[TECHNIQUE_ENABLED]:
return
else:
shared_parameters = rp[SHARED_PARAMETERS]
if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
for group_name, module_name_list, method_parameters in rp[DIFFERENT_GROUPS]:
for module_name in module_name_list:
module = recursive_getattr(self.model, module_name)
module.row_pruning_enabled = True
if not self.verbose[ROW_PRUNING]:
logger.info(f'Row pruning is enabled at step {self.training_steps}')
self.verbose[ROW_PRUNING] = True
def check_channel_pruning(self):
# check channel pruning
cp = self.different_compression_methods[CHANNEL_PRUNING]
if not cp[TECHNIQUE_ENABLED]:
return
else:
shared_parameters = cp[SHARED_PARAMETERS]
if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
for group_name, module_name_list, method_parameters in cp[DIFFERENT_GROUPS]:
for module_name in module_name_list:
module = recursive_getattr(self.model, module_name)
module.channel_pruning_enabled = True
if not self.verbose[CHANNEL_PRUNING]:
logger.info(f'Channel pruning is enabled at step {self.training_steps}')
self.verbose[CHANNEL_PRUNING] = True
def check_all_modules(self):
# check all different compression methods we have
self.check_weight_quantization()
self.check_activation_quantization()
self.check_sparse_pruning()
self.check_head_pruning()
self.check_row_pruning()
self.check_channel_pruning()
def step(self, step_zero_check=False):
if not step_zero_check:
self.training_steps += 1
self.check_all_modules()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.runtime.config_utils import DeepSpeedConfigModel
from deepspeed.runtime.zero.config import DeepSpeedZeroConfig
from pydantic import Field
from pydantic import validator
from typing import Dict, Union
from enum import Enum
class DtypeEnum(Enum):
# The torch dtype must always be the first value (so we return torch.dtype)
fp16 = torch.float16, "torch.float16", "fp16", "float16", "half"
fp32 = torch.float32, "torch.float32", "fp32", "float32", "float"
bf16 = torch.bfloat16, "torch.bfloat16", "bf16", "bfloat16", "bfloat"
int8 = torch.int8, "torch.int8", "int8"
# Copied from https://stackoverflow.com/a/43210118
# Allows us to use multiple values for each Enum index and returns first
# listed value when Enum is called
def __new__(cls, *values):
obj = object.__new__(cls)
# first value is canonical value
obj._value_ = values[0]
for other_value in values[1:]:
cls._value2member_map_[other_value] = obj
obj._all_values = values
return obj
def __repr__(self):
return "<%s.%s: %s>" % (
self.__class__.__name__,
self._name_,
", ".join([repr(v) for v in self._all_values]),
)
class MoETypeEnum(str, Enum):
residual = "residual"
standard = "standard"
class DeepSpeedTPConfig(DeepSpeedConfigModel):
""" Configure tensor parallelism settings """
enabled: bool = True
""" Turn tensor parallelism on/off. """
tp_size: int = 1
""" Number of devices to split the model across using tensor parallelism. """
mpu: object = None
"""
A model parallelism unit object that implements
``get_{model,data}_parallel_{rank,group,world_size}()``.
"""
tp_group: object = None
class DeepSpeedMoEConfig(DeepSpeedConfigModel):
""" Sets parameters for MoE """
enabled: bool = True
ep_size: int = 1
"""
The expert-parallelism size which is used for partitioning the experts
across the GPUs in the expert-parallel group.
"""
moe_experts: list = Field([1], alias="num_experts")
""" The global number of experts used in an MoE layer. """
type: MoETypeEnum = MoETypeEnum.standard
"""
Specify the type of MoE layer. We have two types of MoE layer: 'Standard'
and 'Residual'.
"""
ep_mp_group: object = None
ep_group: object = Field(None, alias="expert_group")
class QuantTypeEnum(str, Enum):
asym = "asymmetric"
sym = "symmetric"
class BaseQuantConfig(DeepSpeedConfigModel):
enabled = True
num_bits = 8
q_type: QuantTypeEnum = QuantTypeEnum.sym
q_groups: int = 1
class WeightQuantConfig(BaseQuantConfig):
enabled = True
class ActivationQuantConfig(BaseQuantConfig):
enabled = True
class QKVQuantConfig(DeepSpeedConfigModel):
enabled = True
class QuantizationConfig(DeepSpeedConfigModel):
enabled: bool = True
activation: ActivationQuantConfig = ActivationQuantConfig()
weight: WeightQuantConfig = WeightQuantConfig()
qkv: QKVQuantConfig = QKVQuantConfig()
# todo: brainstorm on how to do ckpt loading for DS inference
class InferenceCheckpointConfig(DeepSpeedConfigModel):
checkpoint_dir: str = None
save_mp_checkpoint_path: str = None
base_dir: str = None
class DeepSpeedInferenceConfig(DeepSpeedConfigModel):
""" Sets parameters for DeepSpeed Inference Engine. """
replace_with_kernel_inject: bool = Field(False, alias="kernel_inject")
"""
Set to true to inject inference kernels for models such as, Bert, GPT2,
GPT-Neo and GPT-J. Otherwise, the injection_dict provides the names of two
linear layers as a tuple:
`(attention_output projection, transformer output projection)`
"""
dtype: DtypeEnum = torch.float16
"""
Desired model data type, will convert model to this type.
Supported target types: `torch.half`, `torch.int8`, `torch.float`
"""
tensor_parallel: DeepSpeedTPConfig = Field({}, alias="tp")
"""
Configuration for tensor parallelism used to split the model across several
GPUs. Expects a dictionary containing values for :any:`DeepSpeedTPConfig`.
"""
enable_cuda_graph: bool = False
"""
Use this flag for capturing the CUDA-Graph of the inference ops, so that it
can run faster using the graph replay method.
"""
zero: DeepSpeedZeroConfig = {}
"""
ZeRO configuration to use with the Inference Engine. Expects a dictionary
containing values for :any:`DeepSpeedZeroConfig`.
"""
triangular_masking: bool = Field(True, alias="tm")
"""
Controls the type of masking for attention scores in transformer layer.
Note that the masking is application specific.
"""
moe: Union[bool, DeepSpeedMoEConfig] = {}
"""
Specify if the type of Transformer is MoE. Expects a dictionary containing
values for :any:`DeepSpeedMoEConfig`.
"""
quant: QuantizationConfig = {}
"""
NOTE: only works for int8 dtype.
Quantization settings used for quantizing your model using the MoQ. The
setting can be one element or a tuple. If one value is passed in, we
consider it as the number of groups used in quantization. A tuple is passed
in if we want to mention that there is extra-grouping for the MLP part of a
Transformer layer (e.g. (True, 8) shows we quantize the model using 8
groups for all the network except the MLP part that we use 8 extra
grouping). Expects a dictionary containing values for
:any:`QuantizationConfig`.
"""
#todo: refactor the following 3 into the new checkpoint_config
checkpoint: str = None
"""
Path to deepspeed compatible checkpoint or path to JSON with load policy.
"""
base_dir: str = None
"""
This shows the root directory under which all the checkpoint files exists.
This can be passed through the json config too.
"""
set_empty_params: bool = False
"""
specifying whether the inference-module is created with empty or real Tensor
"""
save_mp_checkpoint_path: str = None
"""
The path for which we want to save the loaded model with a checkpoint. This
feature is used for adjusting the parallelism degree to help alleviate the
model loading overhead. It does not save any new checkpoint if no path is
passed.
"""
checkpoint_config: InferenceCheckpointConfig = Field({}, alias="ckpt_config")
"""
TODO: Add docs. Expects a dictionary containing values for
:any:`InferenceCheckpointConfig`.
"""
return_tuple: bool = True
"""
Specify whether or not the transformer layers need to return a tuple or a
Tensor.
"""
training_mp_size: int = 1
"""
If loading a checkpoint this is the mp size that it was trained with, it
may be different than what the mp size that you want to use during
inference.
"""
replace_method: str = Field(
"auto",
deprecated=True,
deprecated_msg="This parameter is no longer needed, please remove from your call to DeepSpeed-inference")
injection_policy: Dict = Field(None, alias="injection_dict")
"""
Dictionary mapping a client nn.Module to its corresponding injection
policy. e.g., `{BertLayer : deepspeed.inference.HFBertLayerPolicy}`
"""
injection_policy_tuple: tuple = None
""" TODO: Add docs """
config: Dict = Field(None, alias="args") # todo: really no need for this field if we can refactor
max_out_tokens: int = Field(1024, alias="max_tokens")
"""
This argument shows the maximum number of tokens inference-engine can work
with, including the input and output tokens. Please consider increasing it
to the required token-length required for your use-case.
"""
min_out_tokens: int = Field(1, alias="min_tokens")
"""
This argument communicates to the runtime the minimum number of tokens you
expect you will need to generate. This will cause the runtime to error
if it unable to provide this and provide context on the memory pressure
rather than seg-faulting or providing corrupted output.
"""
transposed_mode: bool = Field(False, alias="transposed_mode")
mp_size: int = Field(1, deprecated=True, new_param="tensor_parallel.tp_size")
"""
Desired model parallel size, default is 1 meaning no model parallelism.
Deprecated, please use the ``tensor_parallel` config to control model
parallelism.
"""
mpu: object = Field(None, deprecated=True, new_param="tensor_parallel.mpu")
ep_size: int = Field(1, deprecated=True, new_param="moe.ep_size")
ep_group: object = Field(None, alias="expert_group", deprecated=True, new_param="moe.ep_group")
ep_mp_group: object = Field(None, alias="expert_mp_group", deprecated=True, new_param="moe.ep_mp_group")
moe_experts: list = Field([1], deprecated=True, new_param="moe.moe_experts")
moe_type: MoETypeEnum = Field(MoETypeEnum.standard, deprecated=True, new_param="moe.type")
@validator("moe")
def moe_backward_compat(cls, field_value, values):
if isinstance(field_value, bool):
return DeepSpeedMoEConfig(moe=field_value)
return field_value
class Config:
# Get the str representation of the datatype for serialization
json_encoders = {torch.dtype: lambda x: str(x)}
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .engine import InferenceEngine
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import time
import os
from deepspeed import comm as dist
from deepspeed.utils.logging import log_dist
from torch.nn.modules import Module
from packaging import version as pkg_version
from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine
from deepspeed.utils.timer import SynchronizedWallClockTimer
from ..runtime.state_dict_factory import SDLoaderFactory
from ..runtime.weight_quantizer import WeightQuantization
from ..module_inject import replace_transformer_layer, generic_injection
from ..comm.comm import init_distributed
from ..pipe import PipelineModule
from ..moe.utils import has_moe_layers
from ..module_inject import LinearAllreduce, LinearLayer, Normalize, ReplaceWithTensorSlicing
from deepspeed.accelerator import get_accelerator
from ..module_inject.policy import TransformerPolicy
from ..module_inject.auto_tp import AutoTP
from ..module_inject.replace_policy import generic_policies
DS_INFERENCE_ENABLED = False
from torch import nn
INFERENCE_MODEL_TIMER = "model-forward-inference"
def build_bloom_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
"""
Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it
relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value
`softmax(l+a) = softmax(l)`. Based on
https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742
TODO @thomasw21 this doesn't work as nicely due to the masking strategy, and so masking varies slightly.
Args:
Returns tensor shaped (batch_size * num_heads, 1, max_seq_len)
attention_mask (`torch.Tensor`):
Token-wise attention mask, this should be of shape (batch_size, max_seq_len).
num_heads (`int`, *required*):
number of heads
dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`):
dtype of the output tensor
"""
import math
batch_size, seq_length = attention_mask.shape
closest_power_of_2 = 2**math.floor(math.log2(num_heads))
base = torch.tensor(2**(-(2**-(math.log2(closest_power_of_2) - 3))),
device=attention_mask.device,
dtype=torch.float32)
powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32)
slopes = torch.pow(base, powers)
if closest_power_of_2 != num_heads:
extra_base = torch.tensor(2**(-(2**-(math.log2(2 * closest_power_of_2) - 3))),
device=attention_mask.device,
dtype=torch.float32)
num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32)
slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
# Note: alibi will added to the attention bias that will be applied to the query, key product of attention
# => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length)
# => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length)
# => the query_length dimension will then be broadcasted correctly
# This is more or less identical to T5's relative position bias:
# https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527
arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :]
alibi = slopes[..., None] * arange_tensor
if dist.is_initialized():
num_heads_per_rank = int(num_heads / dist.get_world_size())
offset = dist.get_rank() * num_heads_per_rank
alibi = alibi.view(batch_size, num_heads, 1, seq_length)
alibi = alibi[:, offset:num_heads_per_rank + offset, :, :]
return alibi.reshape(batch_size * num_heads_per_rank, 1, seq_length).to(dtype)
else:
return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype)
class InferenceEngine(Module):
inference_mp_group = None
inference_ep_group = None
expert_mp_group = None
def __init__(self, model, config):
"""
Args:
model: torch.nn.Module
config: DeepSpeedInferenceConfig
"""
global DS_INFERENCE_ENABLED
DS_INFERENCE_ENABLED = True
super().__init__()
self.module = model
self._config = config
self._get_model_config_generate(config) # keep for weird backward compatibility
# patch model generate with ours if model uses it
if hasattr(self.module, "generate"):
self.generate = self._generate
if hasattr(self.module, "config"):
TransformerPolicy.hf_model_config = self.module.config
# todo: keep this self.injection_dict because we don't use to change config.injection_policy API
# todo: this will get changed when Molly's PR on auto injection dict is merged
self.injection_dict = config.injection_policy
# todo: refactor the mp_group and mp_size related in the next refactor
self.mp_group = config.tensor_parallel.tp_group
self.mpu = config.tensor_parallel.mpu
#self._validate_args(self.mpu, config.replace_with_kernel_inject)
self.quantize_merge_count = 1
self.quantization_scales = None
# these are not needed in the config as we are creating them ourselves in the inference engine
self.ep_group = None # config.moe.ep_group
self.expert_mp_group = None # config.moe.ep_mp_group
self.cuda_graph_created = False
self.checkpoint_engine = TorchCheckpointEngine()
quantization_setting = None
self._init_quantization_setting(
quantization_setting) # todo: update with the new quant config for weight quant
self.model_profile_enabled = False
self._model_times = []
if not self.injection_dict and config.replace_with_kernel_inject:
# This is a hack to remove the prepare_mask function on HF side for BLOOM architecture
self.remove_mask_prepare_for_bloom()
if self.injection_dict or not config.replace_with_kernel_inject:
# This is a hack to redefine the alibi func due to TP
if config.tensor_parallel.tp_size > 1:
self.build_alibi_tensor()
if get_accelerator().device_name() == 'cuda' and config.enable_cuda_graph:
assert pkg_version.parse(torch.__version__) >= pkg_version.parse("1.10"), \
"If you want to use cuda graph, please upgrade torch to at least v1.10"
# Check if model passed to engine is loaded w/ meta tensors, in which case
# kernel injection must be enabled.
# NOTE: This check assumes a Hugging Face hierarchy for the device type i.e. module.device.type
self.model_meta_device = self.module.device.type == 'meta' if hasattr(self.module, "device") else False
# convert model to intended dtype
if config.dtype:
self._convert_to_dtype(config)
if self.mpu:
config.tensor_parallel.tp_size = dist.get_world_size(group=self.mpu.get_model_parallel_group())
self.mp_group = self.mpu.get_model_parallel_group()
elif config.tensor_parallel.tp_size > 1:
self._create_model_parallel_group(config)
config.tensor_parallel.tp_group = self.mp_group
if isinstance(self.module, torch.nn.Module):
moe, _ = has_moe_layers(self.module)
else:
moe = False
if moe and dist.get_world_size() > 1:
self._create_ep_parallel_group(config.moe.moe_experts)
# We only support three modes: 1) user specified policy for tensor-parallelism, 2) kernel injection (replace_with_kernel_inject), and 3) automatic tensor parallelism if tp_size > 1.
if self.injection_dict:
# 1. User specified Tensor Parallelism
assert not config.replace_with_kernel_inject, "Cannot use both user specified injection policy and kernel injection"
for client_module, injection_policy in self.injection_dict.items():
# construct the tuple and pass that instead of a string or dict.
if isinstance(injection_policy, str):
config.injection_policy_tuple = (injection_policy, )
else:
config.injection_policy_tuple = injection_policy
self._apply_injection_policy(config, client_module)
else:
if config.replace_with_kernel_inject:
# 2. DeepSpeed Kernel Injection
self._apply_injection_policy(config)
elif config.tensor_parallel.tp_size > 1:
# 3. Automatic Tensor Parallelism
parser_dict = AutoTP.tp_parser(model)
print("AutoTP: ", parser_dict)
for client_module, injection_policy in parser_dict:
if isinstance(injection_policy, str):
config.injection_policy_tuple = (injection_policy, )
else:
config.injection_policy_tuple = injection_policy
self._apply_injection_policy(config, client_module)
device = get_accelerator().current_device_name()
self.module.to(device)
if config.tensor_parallel.tp_size > 1:
_rng_state = get_accelerator().get_rng_state().to(get_accelerator().current_device_name())
dist.broadcast(_rng_state, 0)
get_accelerator().set_rng_state(_rng_state.cpu())
if config.tensor_parallel.tp_size > 1:
assert not config.enable_cuda_graph, "Cuda graph is not supported for model parallelism"
# Check if local CUDA graphs can be created in replacement modules
self.local_cuda_graph = self._local_cuda_graph_used(self.module)
def profile_model_time(self, use_cuda_events=True):
if not self.model_profile_enabled and not self._config.enable_cuda_graph:
self.module.register_forward_pre_hook(self._pre_forward_hook)
self.module.register_forward_hook(self._post_forward_hook)
self.model_profile_enabled = True
self.use_cuda_events = use_cuda_events
if self.use_cuda_events:
self.timers = SynchronizedWallClockTimer()
# todo: remove this once all the config dicts are centralized from top level pydantic config
def _get_model_config_generate(self, config):
# this is being passed to replace_transformer_layer(config=self.user_model_config_dict)
self.config = getattr(self.module, 'config', None) if config.config is None else config.config
def remove_mask_prepare_for_bloom(self):
if hasattr(self.module, 'transformer'):
if hasattr(self.module.transformer, '_prepare_attn_mask'):
self.module.transformer._prepare_attn_mask = lambda attention_mask, *args, **kwargs: attention_mask
def build_alibi_tensor(self):
if hasattr(self.module, 'transformer'):
if hasattr(self.module.transformer, 'build_alibi_tensor'):
self.module.transformer.build_alibi_tensor = build_bloom_alibi_tensor
def _pre_forward_hook(self, module, *inputs, **kwargs):
if self.use_cuda_events:
self.timers(INFERENCE_MODEL_TIMER).start()
else:
get_accelerator().synchronize()
self._start = time.time()
def _post_forward_hook(self, module, input, output):
if self.use_cuda_events:
self.timers(INFERENCE_MODEL_TIMER).stop()
elapsed_time = self.timers(INFERENCE_MODEL_TIMER).elapsed(reset=True)
else:
get_accelerator().synchronize()
self._end = time.time()
elapsed_time = self._end - self._start
self._model_times.append(elapsed_time)
def _create_model_parallel_group(self, config):
# Call the init process
if InferenceEngine.inference_mp_group is None:
init_distributed()
local_rank = int(os.getenv('LOCAL_RANK', '0'))
get_accelerator().set_device(local_rank)
ranks = [i for i in range(config.tensor_parallel.tp_size)]
self.mp_group = dist.new_group(ranks)
InferenceEngine.inference_mp_group = self.mp_group
else:
self.mp_group = InferenceEngine.inference_mp_group
def _create_ep_parallel_group(self, moe_experts):
# Call the init process
self.ep_group = {}
self.expert_mp_group = {}
moe_experts = moe_experts if type(moe_experts) is list else [moe_experts]
for e in moe_experts:
self.ep_group.update({e: None})
self.expert_mp_group.update({e: None})
for moe_ep_size in self.ep_group.keys():
num_ep_groups = dist.get_world_size() // moe_ep_size
for i in range(num_ep_groups):
ep_cnt = i * moe_ep_size
size = dist.get_world_size() if moe_ep_size > dist.get_world_size() else moe_ep_size
ranks = list(range(ep_cnt, ep_cnt + size))
_ep_group = dist.new_group(ranks)
if dist.get_rank() in ranks:
self.ep_group.update({moe_ep_size: _ep_group})
if dist.get_world_size() > moe_ep_size:
num_expert_mp_groups = dist.get_world_size() // num_ep_groups
expert_mp_size = dist.get_world_size() // moe_ep_size
for i in range(num_expert_mp_groups):
expert_mp_comm_ranks = [i + nr * moe_ep_size for nr in range(expert_mp_size)]
_expert_mp_group = dist.new_group(expert_mp_comm_ranks)
if dist.get_rank() in expert_mp_comm_ranks:
self.expert_mp_group.update({moe_ep_size: _expert_mp_group})
def _init_quantization_setting(self, quantization_setting):
self.quantize_bits = 8
self.mlp_extra_grouping = False
self.quantize_groups = 1
if type(quantization_setting) is tuple:
self.mlp_extra_grouping, \
self.quantize_groups = quantization_setting
elif quantization_setting is not None:
self.quantize_groups = quantization_setting
log_dist(
f"quantize_bits = {self.quantize_bits} "
f"mlp_extra_grouping = {self.mlp_extra_grouping}, "
f"quantize_groups = {self.quantize_groups}", [0])
# TODO: remove this function and add this functionality to pydantic config checking
def _validate_args(self, mpu, replace_with_kernel_inject):
# TODO: to support SD pipeline we need to avoid this check for now
if replace_with_kernel_inject and not isinstance(self.module, Module):
raise ValueError(f"model must be a torch.nn.Module, got {type(self.module)}")
if not isinstance(self._config.tensor_parallel.tp_size, int) or self._config.tensor_parallel.tp_size < 1:
raise ValueError(f"mp_size must be an int >= 1, got {self._config.tensor_parallel.tp_size}")
if mpu:
methods = ["get_model_parallel_group", "get_data_parallel_group"]
for method in methods:
if not hasattr(mpu, method):
raise ValueError(f"mpu is missing {method}")
if self._config.checkpoint is not None and not isinstance(self._config.checkpoint, (str, dict)):
raise ValueError(f"checkpoint must be None, str or dict, got {type(self._config.checkpoint)}")
supported_dtypes = [None, torch.half, torch.int8, torch.float]
if self._config.dtype not in supported_dtypes:
raise ValueError(f"{self._config.dtype} not supported, valid dtype: {supported_dtypes}")
if self.injection_dict is not None and not isinstance(self.injection_dict, dict):
raise ValueError(f"injection_dict must be None or a dict, got: {self.injection_dict}")
def load_model_with_checkpoint(self, r_module):
self.mp_replace = ReplaceWithTensorSlicing(
mp_group=self.mp_group, mp_size=self._config.tensor_parallel.tp_size) #, out_dim=0, in_dim=1)
error_msgs = []
def load(module, state_dict, prefix):
args = (state_dict, prefix, {}, True, [], [], error_msgs)
if hasattr(module, 'weight'):
if module.weight.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.weight = torch.nn.parameter.Parameter(data=torch.empty_like(module.weight.data,
device="cpu"),
requires_grad=module.weight.data.requires_grad)
if 'query_key_value' in prefix:
module.weight = self.mp_replace.strided_copy(module.weight.data,
state_dict[prefix + 'weight'],
num_splits=3)
else:
module.weight = self.mp_replace.copy(module.weight.data, state_dict[prefix + 'weight'])
else:
if module.norm.weight.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.norm.weight = torch.nn.parameter.Parameter(
data=torch.empty_like(module.norm.weight.data, device="cpu"),
requires_grad=module.norm.weight.data.requires_grad)
module.norm.weight = self.mp_replace.copy(module.norm.weight.data, state_dict[prefix + 'weight'])
if prefix + 'bias' in self.key_list:
if hasattr(module, 'norm'):
if module.norm.bias.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.norm.bias = torch.nn.parameter.Parameter(
data=torch.empty_like(module.norm.bias.data, device="cpu"),
requires_grad=module.norm.bias.data.requires_grad)
module.norm.bias = self.mp_replace.copy(module.norm.bias, state_dict[prefix + 'bias'])
else:
if module.bias.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.bias.data,
device="cpu"),
requires_grad=module.bias.data.requires_grad)
data = state_dict[prefix + 'bias']
data = data.to(get_accelerator().current_device_name())
module.bias = self.mp_replace.copy(module.bias, data)
layer_policies = {
nn.Linear: load,
nn.Embedding: load,
nn.LayerNorm: load,
LinearLayer: load,
LinearAllreduce: load
}
def load_module_recursive(module, prefix='', level=0):
for name, child in module.named_children():
if child.__class__ in layer_policies:
checking_key = prefix + name + '.'
if not any(checking_key in item for item in self.key_list):
continue
if len(list(child.parameters())) > 0 and list(child.parameters())[0].numel() == 0:
if len(child.weight.ds_shape) == 1:
child = Normalize(dim=child.weight.ds_shape[-1], dtype=child.weight.dtype, eps=child.eps)
setattr(module, name, child)
load(child, self.sd, prefix + name + '.')
else:
load_module_recursive(child, prefix if level == 0 else prefix + name + '.', level + 1)
load_module_recursive(r_module)
embedding_weight = None
for n, p in r_module.named_parameters():
if "word_embeddings." in n or "embed_tokens." in n or "wte." in n:
embedding_weight = p
if embedding_weight is not None and hasattr(r_module, "lm_head") and hasattr(
r_module.lm_head, "weight") and r_module.lm_head.weight.is_meta:
r_module.lm_head.weight = embedding_weight
def _apply_injection_policy(self, config, client_module=None):
# client_module is only passed when using the injection_dict method.
checkpoint_dir = config.checkpoint
checkpoint = SDLoaderFactory.get_sd_loader_json(checkpoint_dir,
self.checkpoint_engine) if checkpoint_dir is not None else None
generic_injection(self.module,
fp16=(config.dtype == torch.half) or (config.dtype == torch.int8),
bf16=(config.dtype == torch.bfloat16),
enable_cuda_graph=config.enable_cuda_graph)
if isinstance(self.module, torch.nn.Module):
# config is our DeepSpeedInferenceConfig and self.config is the HF model config
replace_transformer_layer(client_module, self.module, checkpoint, config, self.config)
def _get_all_ckpt_names(self, checkpoints_path, tag):
ckpt_file_pattern = self._get_ckpt_name(checkpoints_path, tag, mp_placeholder="*")
import glob
ckpt_files = glob.glob(ckpt_file_pattern)
ckpt_files.sort()
return ckpt_files
def _get_ckpt_name(self, checkpoints_path, tag, mp_placeholder=None):
if mp_placeholder is not None:
mp_rank_str = mp_placeholder
else:
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
mp_rank_str = "{:02d}".format(mp_rank)
ckpt_name = os.path.join(
checkpoints_path,
"mp_rank_" + mp_rank_str + "_model_states.pt",
)
return ckpt_name
def _load_checkpoint(self, load_dir, load_module_strict=True, tag=None):
is_pipe_parallel = isinstance(self.module, PipelineModule)
if is_pipe_parallel:
raise RuntimeError('pipeline parallelism is currently not supported in inference.')
if not isinstance(load_dir, dict) and os.path.isdir(load_dir):
if tag is None:
latest_path = os.path.join(load_dir, "latest")
if os.path.isfile(latest_path):
with open(latest_path, "r") as fd:
tag = fd.read().strip()
ckpt_list = self._get_all_ckpt_names(load_dir, tag)
sd_loader = SDLoaderFactory.get_sd_loader(ckpt_list, self.checkpoint_engine)
else:
sd_loader = SDLoaderFactory.get_sd_loader_json(load_dir, self.checkpoint_engine)
checkpoint = sd_loader['checkpoints']
if type(checkpoint) is list:
self.sd = torch.load(checkpoint[0], map_location='cpu')
self.key_list = list(self.sd.keys())
self.load_model_with_checkpoint(self.module)
for i in range(1, len(checkpoint)):
if not dist.is_initialized() or dist.get_rank() == 0:
print(f"loading checkpoint ({i})")
self.sd = torch.load(checkpoint[i], map_location=get_accelerator().device_name())
self.key_list = list(self.sd.keys())
self.load_model_with_checkpoint(self.module)
else:
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
load_path, checkpoint, quantize_config = sd_loader.load(self._config.tensor_parallel.tp_size,
mp_rank,
is_pipe_parallel=is_pipe_parallel,
quantize=(self._config.dtype is torch.int8),
quantize_groups=self.quantize_groups,
mlp_extra_grouping=self.mlp_extra_grouping)
self.quantization_scales, self.quantize_merge_count = quantize_config
moe, _ = has_moe_layers(self.module)
if moe:
from deepspeed.runtime.engine import DeepSpeedEngine
old_moe_load = False
if not isinstance(checkpoint['num_experts'], list):
old_moe_load = True
DeepSpeedEngine.load_moe_state_dict(load_dir,
tag,
state_dict=checkpoint[self._choose_module_key(checkpoint)],
old_moe_load=old_moe_load,
model=self.module,
mpu=self.mpu,
checkpoint_engine=self.checkpoint_engine)
self.module.load_state_dict(state_dict=checkpoint[self._choose_module_key(checkpoint)],
strict=load_module_strict)
def _choose_module_key(self, sd):
assert not ('module' in sd
and 'model' in sd), "checkpoint has both 'model' and 'module' keys, not sure how to proceed"
assert 'module' in sd or 'model' in sd, "checkpoint contains neither 'model' or 'module' keys, not sure how to proceed"
if 'module' in sd:
return 'module'
elif 'model' in sd:
return 'model'
def _convert_to_dtype(self, config):
if not isinstance(self.module, torch.nn.Module):
return
if False: #config.dtype is torch.int8 and self.quantization_scales is None:
quantizer = WeightQuantization(mlp_extra_grouping=self.mlp_extra_grouping)
model, self.quantization_scales = quantizer.model_quantize(self.module, self.injection_dict,
self.quantize_bits, self.quantize_groups)
elif config.dtype == torch.half:
self.module.half()
elif config.dtype == torch.bfloat16:
self.module.bfloat16()
elif config.dtype == torch.float:
self.module.float()
def _create_cuda_graph(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = get_accelerator().Stream()
cuda_stream.wait_stream(get_accelerator().current_stream())
with get_accelerator().stream(cuda_stream):
for i in range(3):
ret = self.module(*inputs, **kwargs)
get_accelerator().current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._cuda_graphs = torch.cuda.CUDAGraph()
self.static_inputs = inputs
self.static_kwargs = kwargs
with torch.cuda.graph(self._cuda_graphs):
self.static_output = self.module(*self.static_inputs, **self.static_kwargs)
self.cuda_graph_created = True
def _graph_replay(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_inputs[i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_kwargs[k].copy_(kwargs[k])
self._cuda_graphs.replay()
return self.static_output
def model_times(self):
assert self.model_profile_enabled, "model profiling is not enabled"
model_times = self._model_times
if self._config.enable_cuda_graph and len(self._model_times) == 0:
raise ValueError("Model times are empty and cuda graph is enabled. If "
"this is a GPT-style model this combo is not supported. If this is a "
"BERT-style model this is a bug, please report it. "
f"Model type is: {type(self.module)}")
self._model_times = []
return model_times
def _module_match(self, module):
for policy in generic_policies:
policy = policy()
if policy.match_replaced(module):
return True
return False
def _local_cuda_graph_used(self, module):
if isinstance(module, torch.nn.Module):
return False
else:
sub_module_cuda_graph = False
for name in module.__dict__.keys():
sub_module = getattr(module, name)
if self._module_match(sub_module) and hasattr(sub_module, "enable_cuda_graph"):
sub_module_cuda_graph = True
return sub_module_cuda_graph
def forward(self, *inputs, **kwargs):
"""Execute forward propagation
Arguments:
*inputs: Variable length input list
**kwargs: variable length keyword arguments
"""
start = None
if self.model_profile_enabled and get_accelerator().device_name() == 'cuda' and self._config.enable_cuda_graph:
get_accelerator().synchronize()
start = time.time()
if get_accelerator().device_name() == 'cuda' and self._config.enable_cuda_graph and not self.local_cuda_graph:
if self.cuda_graph_created:
outputs = self._graph_replay(*inputs, **kwargs)
else:
self._create_cuda_graph(*inputs, **kwargs)
outputs = self._graph_replay(*inputs, **kwargs)
else:
outputs = self.module(*inputs, **kwargs)
if self.model_profile_enabled and self._config.enable_cuda_graph:
get_accelerator().synchronize()
duration = time.time() - start
self._model_times.append(duration)
return outputs
def _generate(self, *inputs, **kwargs):
# Reset KV-cache at the beginning of generate
if hasattr(self.module, 'reset_cache'):
self.module.reset_cache()
num_beams = 1
if "generation_config" in kwargs:
gen_config = kwargs["generation_config"]
num_beams = getattr(gen_config, "num_beams", 1)
if "num_beams" in kwargs:
num_beams = kwargs["num_beams"]
if num_beams > 1:
raise NotImplementedError("DeepSpeed does not support `num_beams` > 1, if this is important to you please "
"add your request to: https://github.com/microsoft/DeepSpeed/issues/2506")
return self.module.generate(*inputs, **kwargs)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from . import adam
from . import adagrad
from . import lamb
#from ..git_version_info_installed import installed_ops as __installed_ops__
#if __installed_ops__['sparse_attn']:
from . import sparse_attention
from . import transformer
from .transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
from ..git_version_info import compatible_ops as __compatible_ops__
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .dropping_utils import gpt_sample_tokens, bert_sample_tokens, GatherTokens, ScatterTokens
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.ops.op_builder import RandomLTDBuilder
"""
Returns:
sampled_indices: [layers, batch_size, reserved_length]
new_mask: [batch_size, 1, reserved_length, reserved_length]
"""
random_ltd_module = None
def gpt_sample_tokens(reserved_length: int,
seq_length: int,
batch_size: int,
layers: int = 1,
device: str = 'cpu',
attn_mask: torch.Tensor = None):
prob_dist = torch.ones((layers * batch_size, seq_length), device=device)
sampled_indices = torch.multinomial(prob_dist, reserved_length)
sampled_indices = sampled_indices.reshape(layers, batch_size, reserved_length).to(torch.int32)
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
sampled_indices = random_ltd_module.token_sort_(sampled_indices, seq_length)
# Not certain the optimized kernel is actually better here, cause it kind of screws
# with alignment right if the sequence length is not divisible by like 16
# new_mask = random_ltd_module.mask_gather_gpt(attn_mask, reserved_length)
if attn_mask is not None:
new_mask = attn_mask[:, :, :reserved_length, :reserved_length]
else:
new_mask = None
return sampled_indices, new_mask
"""
Returns:
sampled_indices: [layers, batch_size, reserved_length]
new_mask: [layers, batch_size, 1, reserved_length, reserved_length]
"""
def bert_sample_tokens(reserved_length: int,
seq_length: int,
batch_size: int,
layers: int = 1,
device: str = 'cpu',
attn_mask: torch.Tensor = None):
assert attn_mask is not None
prob_dist = torch.ones((layers * batch_size, seq_length), device=device)
sampled_indices = torch.multinomial(prob_dist, reserved_length)
sampled_indices = sampled_indices.reshape(layers, batch_size, reserved_length).to(torch.int32)
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
sampled_indices = random_ltd_module.token_sort_(sampled_indices, seq_length)
dtype = sampled_indices.dtype
sampled_indices = sampled_indices.to(torch.long)
new_mask = []
for l in range(layers):
tmp_mask_list = []
for i in range(batch_size):
mask_tmp = attn_mask[i:i + 1, :, sampled_indices[l][i], :]
tmp_mask_list.append(mask_tmp[:, :, :, sampled_indices[l][i]])
new_mask.append(torch.cat(tmp_mask_list, dim=0))
return sampled_indices.to(dtype), new_mask
class GatherTokens(torch.autograd.Function):
@staticmethod
def forward(ctx, activations: torch.Tensor, sorted_indices: torch.Tensor, batch_first: bool):
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
ctx.save_for_backward(activations, sorted_indices)
ctx.batch_first = batch_first
return activations, random_ltd_module.token_gather(activations, sorted_indices, batch_first)
@staticmethod
def backward(ctx, a_gradients: torch.Tensor, g_gradients: torch.Tensor):
g_gradients = g_gradients.contiguous()
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
activations, sorted_indices = ctx.saved_tensors
batch_first = ctx.batch_first
return random_ltd_module.token_scatter_(a_gradients, g_gradients, sorted_indices, batch_first), None, None
class ScatterTokens(torch.autograd.Function):
@staticmethod
def forward(ctx, all_activations: torch.Tensor, layer_activations: torch.Tensor, sorted_indices: torch.Tensor,
batch_first: bool):
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
scatter_results = random_ltd_module.token_scatter_(all_activations.clone(), layer_activations, sorted_indices,
batch_first)
ctx.save_for_backward(sorted_indices)
ctx.batch_first = batch_first
return scatter_results
@staticmethod
def backward(ctx, out_gradients: torch.Tensor):
out_gradients = out_gradients.contiguous()
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
sorted_indices, = ctx.saved_tensors
batch_first = ctx.batch_first
ret_val = random_ltd_module.token_gather(out_gradients, sorted_indices, batch_first)
return out_gradients, ret_val, None, None
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .fused_lamb import FusedLamb
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Copyright NVIDIA/apex
This file is adapted from NVIDIA/apex/optimizer/fused_adam and implements the LAMB optimizer
"""
import types
import torch
from deepspeed.ops.op_builder import FusedLambBuilder
class FusedLamb(torch.optim.Optimizer):
"""Implements the LAMB algorithm. Currently GPU-only.
LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes.
https://arxiv.org/abs/1904.00962
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
bias_correction (bool, optional): bias correction (default: True)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
max_grad_norm (float, optional): value used to clip global grad norm
(default: 0.0)
max_coeff(float, optional): maximum value of the lamb coefficient (default: 10.0)
min_coeff(float, optional): minimum value of the lamb coefficient (default: 0.01)
amsgrad (boolean, optional): NOT SUPPORTED in FusedLamb!
"""
def __init__(self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.,
max_grad_norm=0.,
max_coeff=10.0,
min_coeff=0.01,
amsgrad=False):
self.fused_lamb_cuda = FusedLambBuilder().load()
if amsgrad:
raise RuntimeError('FusedLamb does not support the AMSGrad variant.')
defaults = dict(lr=lr,
bias_correction=bias_correction,
betas=betas,
eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm,
max_coeff=max_coeff,
min_coeff=min_coeff)
super(FusedLamb, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
self.lamb_coeffs = []
def step(self, closure=None, grads=None, output_params=None, scale=1., grad_norms=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
if output_params is None:
output_params_group = [None] * len(self.param_groups)
elif isinstance(output_params, types.GeneratorType):
output_params_group = [output_params]
elif type(output_params[0]) != list:
output_params_group = [output_params]
else:
output_params_group = output_params
if grad_norms is None:
grad_norms = [None] * len(self.param_groups)
#remove the previous coeffs
del self.lamb_coeffs[:]
for group, grads_this_group, output_params_this_group, grad_norm_group in zip(
self.param_groups, grads_group, output_params_group, grad_norms):
if grads_this_group is None:
grads_this_group = [None] * len(group['params'])
if output_params_this_group is None:
output_params_this_group = [None] * len(group['params'])
if grad_norm_group is None:
grad_norm_group = [None] * len(group['params'])
elif not isinstance(grad_norm_group, list):
grad_norm_group = [grad_norm_group]
bias_correction = 1 if group['bias_correction'] else 0
for p, grad, output_param, grad_norm in zip(group['params'], grads_this_group, output_params_this_group,
grad_norm_group):
# compute combined scale factor for this group
combined_scale = scale
if group['max_grad_norm'] > 0:
# norm is in fact norm*scale
clip = ((grad_norm / scale) + 1e-6) / group['max_grad_norm']
if clip > 1:
combined_scale = clip * scale
#note: p.grad should not ever be set for correct operation of mixed precision optimizer that sometimes sends None gradients
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('FusedLamb does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
max_coeff = group['max_coeff']
min_coeff = group['min_coeff']
state['step'] += 1
out_p = torch.tensor([], dtype=torch.float) if output_param is None else output_param
lamb_coeff = self.fused_lamb_cuda.lamb(p.data, out_p, exp_avg, exp_avg_sq, grad, group['lr'], beta1,
beta2, max_coeff, min_coeff, group['eps'], combined_scale,
state['step'], self.eps_mode, bias_correction,
group['weight_decay'])
self.lamb_coeffs.append(lamb_coeff)
return loss
def get_lamb_coeffs(self):
lamb_coeffs = [lamb_coeff.item() for lamb_coeff in self.lamb_coeffs]
return lamb_coeffs
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a
# https://github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/matmul.py
import importlib
import torch
import triton
import triton.language as tl
import triton._C.libtriton as libtriton
from deepspeed.accelerator import get_accelerator
@triton.jit
def _kernel(A, B, C, stride_za, stride_ha, stride_ma, stride_ka, stride_zb, stride_hb, stride_kb, stride_nb, stride_zc,
stride_hc, stride_mc, stride_nc, DS0, DS1, SDD_K, SDD_off_width, lut, locks, nlocks, **meta):
TM = meta['TM']
TN = meta['TN']
TK = meta['TK']
TZ = meta['TZ']
BLOCK = meta['BLOCK']
#------------#
#- Prologue -#
#------------#
pid0 = tl.program_id(0)
pid1 = tl.program_id(1)
pidz = tl.program_id(2)
if meta['SDD']:
pid1 = pid1 + SDD_off_width
blockidm = tl.arange(0, TM) // BLOCK
blockidn = tl.arange(0, TN) // BLOCK
offlutm = blockidm * (TN // BLOCK) * 4
offlutn = blockidn * 4
header = lut + pid1 * (TM // BLOCK) * (TN // BLOCK) * 4
z = tl.load(header + 0)
i = tl.load(header + 1 + offlutm)
j = tl.load(header + 2 + offlutn)
AS1 = SDD_K // TZ
lockid = tl.where(TZ > 1, 1, 0)
offka = pid0 * AS1
offkb = pid0 * AS1
offmc = 0
offnc = 0
offpa = 0
offpb = 0
maxid = TZ
offhc = 0
offha = z
offhb = z
ram = i * BLOCK + (tl.arange(0, TM) % BLOCK)
rbn = j * BLOCK + (tl.arange(0, TN) % BLOCK)
else:
header = lut + pid0 * 6
offset = tl.load(header + 0)
AS1 = tl.load(header + 1)
column = tl.load(header + 2)
depth = tl.load(header + 3)
lockid = tl.load(header + 4)
maxid = tl.load(header + 5)
pinc = lut + offset
offhc = depth
if meta['DSD']:
# output offset
offnc = pid1 * TN
offmc = column * TM
offpc = 0
# dense input offset
offnb = pid1 * TN
offkb = tl.load(pinc)
offkb = tl.multiple_of(offkb, 8) # compiler hint
offpb = 0
# sparse input offset
offma = 0
offka = 0
offpa = tl.load(pinc + 1)
offpa = tl.multiple_of(offpa, 8) # compiler hint
offpa = offpa * BLOCK * BLOCK
offha = 0
offhb = depth
else:
# output offset
offmc = pid1 * TM
offnc = column * TN
offpc = 0
# dense input offset
offma = pid1 * TM
offka = tl.load(pinc)
offka = tl.multiple_of(offka, 8) # compiler hint
offpa = 0
# sparse input offset
offnb = 0
offkb = 0
offpb = tl.load(pinc + 1)
offpb = tl.multiple_of(offpb, 8) # compiler hint
offpb = offpb * BLOCK * BLOCK
offha = depth
offhb = 0
ram = offma + tl.arange(0, TM)
rbn = offnb + tl.arange(0, TN)
# initialize a, b pointers
rka = offka + tl.arange(0, TK)
rkb = offkb + tl.arange(0, TK)
pa = A + pidz * stride_za + offha * stride_ha + offpa + ram[:, None] * stride_ma + rka[None, :] * stride_ka
pb = B + pidz * stride_zb + offhb * stride_hb + offpb + rbn[None, :] * stride_nb + rkb[:, None] * stride_kb
if meta['DDS']:
checkam = ram[:, None] < DS0
else:
checkam = AS1 > 0
if meta['DSD']:
checkbn = rbn[None, :] < DS0
else:
checkbn = AS1 > 0
a = tl.load(pa, mask=checkam, other=0.)
b = tl.load(pb, mask=checkbn, other=0.)
## ---------------- ##
## Inner Loop ##
## ---------------- ##
acc = tl.zeros((TM, TN), dtype=tl.float32)
for k in range(AS1, 0, -TK):
acc += tl.dot(a, b)
if meta['SDD']:
inc_a = TK * stride_ka
inc_b = TK * stride_kb
else:
pinc += 2
if meta['DSD']:
inc_b = tl.load(pinc)
inc_a = tl.load(pinc + 1)
inc_b = tl.multiple_of(inc_b, 8)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = inc_b * stride_kb
if meta['DDS']:
inc_a = tl.load(pinc)
inc_b = tl.load(pinc + 1)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = tl.multiple_of(inc_b, 8)
inc_a = inc_a * stride_ka
pa += inc_a
pb += inc_b
# pre-fetch
checkak = k > TK
checkbk = k > TK
checka = checkam & checkak
checkb = checkbn & checkbk
a = tl.load(pa, mask=checka)
b = tl.load(pb, mask=checkb)
c = acc.to(C.dtype.element_ty)
if meta['SDD']:
checkc = True
rr_blockidm = tl.arange(0, TM) // BLOCK
rr_blockidn = tl.arange(0, TN) // BLOCK
rr_offlutm = rr_blockidm * (TN // BLOCK) * 4
rr_offlutn = rr_blockidn * 4
off_bkid = 3 + rr_offlutm[:, None] + rr_offlutn[None, :]
bkid = tl.load(header + off_bkid)
offpc = bkid * BLOCK * BLOCK
rcm = tl.arange(0, TM) % BLOCK
rcn = tl.arange(0, TN) % BLOCK
else:
rcm = offmc + tl.arange(0, TM)
rcn = offnc + tl.arange(0, TN)
if meta['DSD']:
checkc = rcn[None, :] < DS0
if meta['DDS']:
checkc = rcm[:, None] < DS0
pc = C + offpc + offhc * stride_hc + pidz * stride_zc + rcm[:, None] * stride_mc + rcn[None, :] * stride_nc
# write-back directly
if lockid == 0:
tl.store(pc, c, mask=checkc)
# accumulate partial results using spin-locks
else:
plock = locks + tl.program_id(2) * nlocks * tl.num_programs(1) + tl.program_id(1) * nlocks + lockid - 1
pcount = plock + tl.num_programs(2) * tl.num_programs(1) * nlocks
while tl.atomic_cas(plock, 0, 1) == 1:
pass
count = tl.load(pcount)
if count == 0:
tl.store(pc, c, mask=checkc)
else:
d = tl.load(pc, mask=checkc)
tl.store(pc, d + c, mask=checkc)
tl.atomic_xchg(pcount, (count + 1) % maxid)
tl.atomic_xchg(plock, 0)
##############
# MAIN API #
##############
class _sparse_matmul(torch.autograd.Function):
sdd_cache = dict()
dsd_cache = dict()
dds_cache = dict()
locks = dict()
# Given an array sizes representing reduction size for each
# column of a block-mode matrix multiplication,
# performs load-balancing to achieve more smaller reductions
# between `seg_size` elements
@staticmethod
def load_balance(sizes, block):
#global triton
#if triton is None:
# triton = importlib.import_module('triton')
# segment size
# heuristics taken from OpenAI blocksparse code
# https://github.com/openai/blocksparse/blob/master/blocksparse/matmul.py#L95
max_size = sizes.max()
min_size = sizes[sizes != 0].min()
#if max_size > min_size * 2.0:
# seg_max = max(triton.cdiv(max_size, 4), min_size*2)
#else:
# seg_max = max_size
seg_max = max_size
seg_min = max(triton.cdiv(seg_max, 4), 4)
# split reduction into segments
div = sizes // seg_max
rem = sizes % seg_max
packs = div + (sizes < seg_min).long() + (rem >= seg_min).long()
width = packs.sum()
segments = torch.empty(width, dtype=sizes.dtype)
column = torch.empty_like(segments)
lockid = torch.zeros_like(segments)
maxid = torch.zeros_like(segments)
nlocks = 0
current = 0
col_idx = 0
for i in range(len(sizes)):
d, r = div[i], rem[i]
isempty = sizes[i] < seg_min
last = current + d + (r >= seg_min) + isempty
# column id
column[current:last] = col_idx
# lock id
if d > 1 or (d == 1 and r >= seg_min):
nlocks += 1
lockid[current:last] = nlocks
maxid[current:last] = last - current
# segment size
segments[current:current + d] = seg_max
if r < seg_min and not isempty:
segments[current + d - 1] += r
if r >= seg_min or isempty:
segments[current + d] = r
current = last
col_idx += 1
offsets = torch.zeros_like(segments)
offsets[1:] = torch.cumsum(segments[:-1], dim=0)
return segments, column, lockid, maxid, offsets
@staticmethod
def get_locks(size, dev):
if dev not in _sparse_matmul.locks or \
size > _sparse_matmul.locks[dev].size(0):
_sparse_matmul.locks[dev] = torch.zeros(size, dtype=torch.int32, device=dev)
return _sparse_matmul.locks[dev]
##########################
# SPARSE = DENSE x DENSE #
##########################
@staticmethod
def make_sdd_lut(layout, block, dtype, device):
#_sparse_matmul._load_utils()
#start_width = 64 // block
#segmented = _sparse_matmul.sdd_segment(layout.type(torch.int32), start_width)
start_width = (128 if block > 16 else 32) // block
layout = layout.type(torch.int32)
segmented = libtriton.superblock(layout.data_ptr(), layout.shape[0], layout.shape[1], layout.shape[2],
start_width)
luts, widths, packs = [], [], []
for size, nnz in segmented:
""" width = nnz.shape[0] // (size * size)
h = nnz[:, 0]
i = nnz[:, 1]
j = nnz[:, 2]
b = nnz[:, 3]
lut = torch.stack((h, i, j, b), dim=1).view(-1).contiguous()
luts.append(lut.type(torch.int32).to(device))
widths.append(width)
packs.append(size) """
nnz = nnz.reshape(-1, 4)
width = nnz.shape[0] // (size * size)
luts.append(torch.from_numpy(nnz).type(torch.int32).to(device))
widths.append(width)
packs.append(size)
# create locks
return luts, None, widths, packs
@staticmethod
def _sdd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, luts, num_locks, widths, packs, bench, time):
if trans_c:
a, b = b, a
trans_a, trans_b = not trans_b, not trans_a
AS0 = a.size(0)
# Shape check
a_dim = -2 if trans_a else -1
b_dim = -1 if trans_b else -2
a_inner, b_inner = a.shape[a_dim], b.shape[b_dim]
if a_inner != b_inner:
raise ValueError(f"Size of tensor A along the {a_dim} dim ({a_inner}) must match size "
f"of tensor B along the {b_dim} dim ({b_inner})")
if a_inner % 16 != 0:
raise ValueError('Reduction size for SDD must be a multiple of 16')
batch_size = a.size(0)
a_outer = a.size(3 if trans_a else 2)
dtype = a.dtype
is_16_multiple = a_inner % 16 == 0
is_32_multiple = a_inner % 32 == 0
is_64_multiple = a_inner % 64 == 0
if not is_16_multiple:
raise ValueError('Reduction size for SDD must be a multiple of 16')
device = a.device
# create kernel
total_width = sum([width * pack * pack for width, pack in zip(widths, packs)])
c = torch.empty((batch_size, total_width, block, block), dtype=dtype, device=a.device)
for lut, width, pack in zip(luts, widths, packs):
F32TK = [8, 16]
F16TK = [16]
F16TK += [32] if is_32_multiple else []
F16TK += [64] if is_64_multiple else []
TK = {torch.float32: F32TK, torch.float16: F16TK}[dtype]
num_lock = 1
meta = {
'TM': block * pack,
'TN': block * pack,
'BLOCK': block,
'TK': TK[0],
'TZ': 1,
'SDD': True,
'DSD': False,
'DDS': False
}
# create output
locks = _sparse_matmul.get_locks(2 * width * AS0 * num_lock, a.device)
# maximum grid size is 65535
# so operation might be decomposed into multiple
# kernel calls
max_width = 49152
total = 0 if bench else None
for off_width in range(0, width, max_width):
grid = lambda meta: [meta['TZ'], min(max_width, width - off_width), batch_size]
_kernel[grid](a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(0),
c.stride(2),
c.stride(3),
a_outer,
a_outer,
a_inner,
off_width,
lut,
locks,
num_lock,
num_warps=4,
**meta)
# save for backward pass
return c
##########################
# DENSE = DENSE x SPARSE #
##########################
# Given a binary layout of 0s and 1s,
# Construct look-up table for efficient execution on GPUs
@staticmethod
def make_dxx_lut(layout, block, step, trans, device, transform=lambda idx: idx):
# load-balancing
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
segments = _empty.clone()
column = _empty.clone()
depth = _empty.clone()
lockid = _empty.clone()
maxid = _empty.clone()
offsets = _empty.clone()
current_offset = 0
current_maxid = 0
for z in range(layout.size(0)):
if trans:
sizes = torch.sum(layout[z, :, :], 1)
else:
sizes = torch.sum(layout[z, :, :], 0)
z_segments, z_column, z_lockid, z_maxid, z_offsets = _sparse_matmul.load_balance(sizes, block)
z_depth = z * torch.ones_like(z_segments)
z_lockid[z_lockid > 0] += current_maxid
current_maxid = z_lockid.max()
# concatenate depth
segments = torch.cat((segments, z_segments))
column = torch.cat((column, z_column))
depth = torch.cat((depth, z_depth))
maxid = torch.cat((maxid, z_maxid))
offsets = torch.cat((offsets, current_offset + z_offsets))
lockid = torch.cat((lockid, z_lockid))
current_offset += layout[z, :, :].sum()
segments *= step
# pointer increments
if trans:
nnz = layout.nonzero()
else:
nnz = layout.transpose(1, 2).nonzero()
num_blocks = nnz.size(0)
offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets))
idx = transform(nnz[:, 2] * block)
xincs = idx.clone()
xincs[1:] -= idx[:-1]
# divide block into multiple steps
div = block // step
xincs = xincs.view(-1, 1).repeat(1, div)
xincs[:, 1:] = step
xincs[:, 0] -= (div - 1) * step
# first increment for each reduction is actually the offset
xincs[offsets[segments > 0], 0] = idx[offsets[segments > 0]]
xincs = xincs.view(-1)
# block-mode input increments
if trans:
widx = torch.arange(num_blocks)
else:
widx = _empty.clone()
current_offset = 0
for z in range(layout.size(0)):
layoutw = layout[z, :, :].clone()
msum = layoutw.sum()
layoutw[layoutw > 0] = 1 + torch.arange(msum)
widx = torch.cat((widx, current_offset + layoutw.T[layoutw.T > 0] - 1))
current_offset += msum
widx = widx
wincs = widx * block * block
wincs[1:] -= widx[:-1] * block * block
wincs = wincs.view(-1, 1).repeat(1, div)
if trans:
wincs[:, 1:] = step
wincs[:, 0] -= (div - 1) * step
else:
wincs[:, 1:] = step * block
wincs[:, 0] -= (div - 1) * step * block
wincs[offsets[segments > 0], 0] = widx[offsets[segments > 0]]
wincs = wincs.view(-1)
# adjust offset and segment size
offsets *= 2 * div
segments *= div
# create header
width = column.size(0)
offsets += 6 * width
header = torch.stack((offsets, segments, column, depth, lockid, maxid), dim=1).view(-1).contiguous()
incs = torch.stack((xincs, wincs), dim=1).view(-1).contiguous()
incs = torch.cat((incs, torch.zeros(2, device=incs.device, dtype=incs.dtype)))
# create lut
lut = torch.cat((header, incs))
lut = lut.type(torch.int32).to(device)
# create locks
num_locks = max(1, lockid.max())
return lut, num_locks, width, None
@staticmethod
def _dds_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs, bench, time):
global triton
if triton is None:
triton = importlib.import_module('triton')
# shapes / dtypes
AS0 = a.size(0)
AS1 = a.size(1)
AS2 = a.size(3 if trans_a else 2)
AS3 = a.size(2 if trans_a else 3)
BS0 = spdims[0]
BS1 = block * spdims[2 if trans_b else 1]
BS2 = block * spdims[1 if trans_b else 2]
dtype = a.dtype
# kernel
meta = {'TN': block, 'TM': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1, 'SDD': False, 'DSD': False, 'DDS': True}
# output
CS0 = AS0
CS1 = AS1
CS2 = BS2 if trans_c else AS2
CS3 = AS2 if trans_c else BS2
locks = _sparse_matmul.get_locks(2 * AS0 * AS2 // 32 * num_locks, a.device)
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
grid = lambda meta: [width, triton.cdiv(AS2, meta['TM']), AS0]
_kernel[grid](a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(1),
c.stride(3 if trans_c else 2),
c.stride(2 if trans_c else 3),
AS2,
BS2,
0,
0,
lut,
locks,
num_locks,
num_warps=4,
**meta)
return c
@staticmethod
def _dsd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs, bench, time):
global triton
if triton is None:
triton = importlib.import_module('triton')
# shapes / dtypes
AS0 = spdims[0]
AS1 = block * spdims[2 if trans_a else 1]
AS2 = block * spdims[1 if trans_a else 2]
BS0 = b.size(0)
BS1 = b.size(1)
BS2 = b.size(3 if trans_b else 2)
BS3 = b.size(2 if trans_b else 3)
dtype = a.dtype
# kernel
meta = {'TM': block, 'TN': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1, 'SDD': False, 'DSD': True, 'DDS': False}
# output
CS0 = BS0
CS1 = BS1
CS2 = BS3 if trans_c else AS1
CS3 = AS1 if trans_c else BS3
locks = _sparse_matmul.get_locks(2 * BS0 * BS3 // 32 * num_locks, a.device)
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
grid = lambda meta: [width, triton.cdiv(BS3, meta['TN']), BS0]
_kernel[grid](a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(1),
c.stride(2),
c.stride(3),
BS3,
AS1,
0,
0,
lut,
locks,
num_locks,
num_warps=4,
**meta)
return c
fn = {'sdd': _sdd_matmul.__get__(object), 'dsd': _dsd_matmul.__get__(object), 'dds': _dds_matmul.__get__(object)}
@staticmethod
def forward(ctx, a, b, trans_a, trans_b, trans_c, mode, spdims, block, c_lut, c_num_locks, c_width, c_packs,
c_bench, c_time, da_lut, da_num_locks, da_width, da_packs, da_bench, da_time, db_lut, db_num_locks,
db_width, db_packs, db_bench, db_time):
c = _sparse_matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, c_lut, c_num_locks, c_width,
c_packs, c_bench, c_time)
# save for backward
ctx.save_for_backward(a, b)
ctx.da_num_locks = da_num_locks
ctx.da_lut = da_lut
ctx.da_width = da_width
ctx.da_packs = da_packs
ctx.da_bench = da_bench
ctx.da_time = da_time
ctx.db_lut = db_lut
ctx.db_num_locks = db_num_locks
ctx.db_width = db_width
ctx.db_bench = db_bench
ctx.db_packs = db_packs
ctx.db_time = db_time
ctx.mode = mode
ctx.spdims = spdims
ctx.block = block
ctx.trans_a = trans_a
ctx.trans_b = trans_b
return c
@staticmethod
def backward(ctx, dc):
# saved for backward
a, b = ctx.saved_tensors
mode = ctx.mode
# gradients w.r.t. a
if ctx.needs_input_grad[0]:
mode_da = mode[1] + mode[0] + mode[2]
da = _sparse_matmul.fn[mode_da](dc, b, False, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block,
ctx.da_lut, ctx.da_num_locks, ctx.da_width, ctx.da_packs, ctx.da_bench,
ctx.da_time)
# gradients w.r.t. b
if ctx.needs_input_grad[1]:
mode_db = mode[2] + mode[1] + mode[0]
db = _sparse_matmul.fn[mode_db](a, dc, not ctx.trans_a, False, ctx.trans_b, ctx.spdims, ctx.block,
ctx.db_lut, ctx.db_num_locks, ctx.db_width, ctx.db_packs, ctx.db_bench,
ctx.db_time)
return da, db, None, None, None,\
None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None
class MatMul:
"""Block-Sparse MatMul class; this class handles three types of matrix-multiplication:
- sparse = dense X dense
- dense = sparse X dense
- dense = dense X sparse
For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
"""
def make_lut(self, dtype, device):
"""Generates the sparsity layout/s used in block-sparse matmul
"""
key = (dtype, device)
if key in self.lut_cache:
return self.lut_cache[key]
# C look-up table
layout, block = self.layout, self.block
step = 16
if self.mode == 'sdd':
c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device)
elif self.mode == 'dsd':
c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_dxx_lut(layout, block, step, not self.trans_a,
device)
elif self.mode == 'dds':
c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_dxx_lut(layout, block, step, self.trans_b,
device)
# DA look-up table
if self.mode == 'sdd':
da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_dxx_lut(layout, block, step, True, device)
elif self.mode == 'dsd':
da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device)
elif self.mode == 'dds':
da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_dxx_lut(layout, block, step,
not self.trans_b, device)
# DB look-up table
if self.mode == 'sdd':
db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_dxx_lut(layout, block, step, False, device)
elif self.mode == 'dsd':
db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_dxx_lut(layout, block, step, self.trans_a,
device)
elif self.mode == 'dds':
db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device)
self.lut_cache[key] = (c_lut, c_num_locks, c_width, c_packs,\
da_lut, da_num_locks, da_width, da_packs,\
db_lut, db_num_locks, db_width, db_packs)
return self.lut_cache[key]
def __init__(self, layout, block, mode, trans_a=False, trans_b=False, bench=False):
"""Initialize the Block-Sparse MatMul class.
Arguments:
layout: required: sparsity layout tensor
block: required: an integer determining the block size.
mode: required: a string determining type of matmul; ('sdd') sparse = dense X dense, ('dsd') dense = sparse X dense, ('dds') dense = dense X sparse
trans_a: optional: a boolean determining if multiplication needs to be applied on transpose of input a; default is false
trans_b: optional: a boolean determining if multiplication needs to be applied on transpose of input b; default is false
bench: optional: set if you want to do benchmarking
"""
if mode not in ['sdd', 'dsd', 'dds']:
raise NotImplementedError('Supported modes are: sdd, dsd, dds')
# look-up table cache
self.lut_cache = dict()
# attributes
self.trans_a = trans_a
self.trans_b = trans_b
self.mode = mode
self.block = block
self.layout = layout
layout_dim = layout.ndim
assert layout_dim in (2, 3), "Layout should be a 2 or 3 dimensional tensor of 0s and 1s"
if not mode == 'sdd':
# Dims to be reduced on the 'inside' of the matmul, either -1 or -2
trans_dense, trans_sparse, sparse_inner = (trans_b, trans_a, -1) if mode == 'dsd' else (trans_a, trans_b,
-2)
self.dense_inner_dim = -((sparse_inner % 2) + 1) if not trans_dense else sparse_inner
sparse_inner = sparse_inner if not trans_sparse else -((sparse_inner % 2) + 1)
# Inner dim of the dense input should be equal to the inner dim of the sparse input
self.dense_inner_size = layout.shape[sparse_inner] * block
# Expected shape for sparse inputs
self.sparse_shape = (layout.sum().item(), block, block)
# Support using the same layout across attention heads etc.
if layout_dim == 2:
layout = layout.unsqueeze(0)
layout = layout.long() # Above code assumes the layout tensor is an integral type
self.spdims = layout.shape
# timings
self.bench = bench
self.time_c = None
self.time_da = None
self.time_db = None
# pad shapes of a tensor to make it
# compatible with kernel calls
@staticmethod
def _pad_shape(x, is_sparse):
max_dim = 3 if is_sparse else 4
for i in range(max_dim - x.dim()):
x = x.unsqueeze(0)
return x
def __call__(self, a, b):
"""Applies Block-Sparse MatMul.
For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
Arguments:
a: required: a dense/block-sparse tensor; first input of mat-mul
b: required: a dense/block-sparse tensor; second input of mat-mul
Return:
c: a dense/block-sparse tensor result of a X b
"""
c_lut, c_num_locks, c_width, c_packs,\
da_lut, da_num_locks, da_width, da_packs,\
db_lut, db_num_locks, db_width, db_packs = self.make_lut(a.dtype, a.device)
# timings
time_c = [None]
time_da = [None]
time_db = [None]
original_dims = max(a.ndim, b.ndim)
a, b = self._validate_inputs(a, b)
# pad shapes with ones
a = MatMul._pad_shape(a, self.mode == 'dsd')
b = MatMul._pad_shape(b, self.mode == 'dds')
# execute
c = _sparse_matmul.apply(a, b, self.trans_a, self.trans_b, False, self.mode, self.spdims, self.block, c_lut,
c_num_locks, c_width, c_packs, self.bench, time_c, da_lut, da_num_locks, da_width,
da_packs, self.bench, time_da, db_lut, db_num_locks, db_width, db_packs, self.bench,
time_db)
# This removes any leading singleton dimensions we may have added to the tensor that weren't in the input
dims_to_trim = c.ndim - original_dims
for _ in range(dims_to_trim):
c = c.squeeze(0)
self.time_c = time_c[0]
self.time_da = time_da[0]
self.time_db = time_db[0]
return c
def _validate_inputs(self, a, b):
if a.device != b.device:
raise ValueError(f"Inputs must be on the same device; got {a.device} for tensor A "
f"and {b.device} for tensor B")
if not get_accelerator().on_accelerator(a):
raise ValueError("Only GPU devices are supported for now")
# When autocast is enabled, torch.matmul autocasts to float16, so we do the same here
if torch.is_autocast_enabled():
a, b = a.half(), b.half()
elif a.dtype != b.dtype:
raise ValueError(f"Inputs must be the same dtype; got {a.dtype} for A and {b.dtype} for B")
mode, trans_a, trans_b = self.mode, self.trans_a, self.trans_b
if mode != 'sdd':
# One input is sparse
dense, dense_name, sparse, sparse_name = (a, 'A', b, 'B') if mode == 'dds' else (b, 'B', a, 'A')
dense_inner = dense.shape[self.dense_inner_dim]
if dense_inner != self.dense_inner_size:
raise ValueError(f"Expected tensor {dense_name} to have size {self.dense_inner_size} at dim "
f"{self.dense_inner_dim % dense.ndim}, got {dense_inner}.")
if sparse.shape[-len(self.sparse_shape):] != self.sparse_shape:
raise ValueError(f"Expected tensor with trailing dimensions of shape {self.sparse_shape} for argument "
f"{sparse_name}, got {sparse.shape}")
def add_extra_dims(x):
# Add extra leading singleton dimensions if needed
dims_needed = 4 - x.ndim
if dims_needed > 0:
singletons = [1] * dims_needed
x = x.view(*singletons, *x.shape)
elif dims_needed < 0:
raise ValueError("Tensors with more than 4 dimensions are not currently supported")
return x
# Pad shapes with leading singleton dimensions
a = add_extra_dims(a)
b = add_extra_dims(b)
return a, b
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from torch import nn
from deepspeed.ops.sparse_attention import SparseSelfAttention, FixedSparsityConfig
class BertSparseSelfAttention(nn.Module):
"""Implements Sparse Self Attention layer of Bert model based on https://github.com/microsoft/DeepSpeedExamples/blob/master/bing_bert/nvidia/modelingpreln.py#L373
For more information please see, TODO DeepSpeed Sparse Transformer.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial.
"""
def __init__(
self,
config,
# SparsityConfig parameters needs to be set accordingly
sparsity_config=FixedSparsityConfig(num_heads=4)):
"""Initialize the bert sparse self attention layer.
Note) you can use any of the provided sparsity configs or simply add yours!
Arguments:
config: required: Bert model config
sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on FixedSparsityConfig class.
"""
super(BertSparseSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError("The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.sparse_self_attention = SparseSelfAttention(sparsity_config)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
"""Applies forward phase of bert sparse self attention
Arguments:
hidden_states: required: hidden_states tensor of the bert model
attn_mask: required: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported
Return:
context_layer: a dense tensor containing attention context
"""
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
context_layer = self.sparse_self_attention(query_layer,
key_layer,
value_layer,
key_padding_mask=attention_mask)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size, )
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .sparsity_config import SparsityConfig, DenseSparsityConfig, FixedSparsityConfig, VariableSparsityConfig, BigBirdSparsityConfig, BSLongformerSparsityConfig, LocalSlidingWindowSparsityConfig
from .sparse_self_attention import SparseSelfAttention
from .bert_sparse_self_attention import BertSparseSelfAttention
from .sparse_attention_utils import SparseAttentionUtils
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch.nn as nn
import torch
from torch import distributed as dist
from deepspeed.ops.sparse_attention import SparsityConfig
class SparseSelfAttention(nn.Module):
"""Implements an efficient Sparse Self Attention of Transformer layer based on `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
For more information please see, TODO DeepSpeed Sparse Transformer.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial.
"""
def __init__(
self,
# SparsityConfig parameters needs to be set accordingly
sparsity_config=SparsityConfig(num_heads=4),
key_padding_mask_mode='add',
attn_mask_mode='mul',
max_seq_length=2048):
"""Initialize the sparse self attention layer.
Arguments:
sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class.
key_padding_mask_mode: optional: a string determining if key padding mask needs to be added, `add`, or be multiplied, `mul`.
attn_mask_mode: optional: a string determining if attention mask needs to be added, `add`, or be multiplied, `mul`.
max_seq_length: optional: the maximum sequence length this sparse attention module will be applied to; it controls the size of the master_layout.
"""
super().__init__()
# sparsity information
self.sparsity_config = sparsity_config
# initialize sparse layout and register as buffer
master_layout = self.sparsity_config.make_layout(max_seq_length)
self.register_buffer("master_layout", master_layout)
self._need_layout_synchronization = True
# mask modes
self.key_padding_mask_mode = key_padding_mask_mode
self.attn_mask_mode = attn_mask_mode
ops = dict()
def get_layout(self, L):
# if layout is never synchronized across GPUs, broadcast the layout from global rank 0
if self._need_layout_synchronization and dist.is_initialized():
dist.broadcast(self.master_layout, src=0)
self._need_layout_synchronization = False
if (L % self.sparsity_config.block != 0):
raise ValueError(
f'Sequence Length, {L}, needs to be dividable by Block size {self.sparsity_config.block}!')
num_blocks = L // self.sparsity_config.block
return self.master_layout[..., :num_blocks, :num_blocks].cpu() # layout needs to be a CPU tensor
# add to cache
def get_ops(self, H, L):
from deepspeed.ops.sparse_attention.matmul import MatMul
from deepspeed.ops.sparse_attention.softmax import Softmax
if L not in SparseSelfAttention.ops:
sparsity_layout = self.get_layout(L)
sparse_dot_sdd_nt = MatMul(sparsity_layout, self.sparsity_config.block, 'sdd', trans_a=False, trans_b=True)
sparse_dot_dsd_nn = MatMul(sparsity_layout,
self.sparsity_config.block,
'dsd',
trans_a=False,
trans_b=False)
sparse_softmax = Softmax(sparsity_layout, self.sparsity_config.block)
SparseSelfAttention.ops[L] = (sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax)
return SparseSelfAttention.ops[L]
def transpose_key_for_scores(self, x, L):
bsz, num_heads, seq_len, head_dim = x.size()
if seq_len != L:
return x.permute(0, 1, 3, 2)
return x
def transpose_mask_for_sparse(self, qtype, x, is_key_padding_mask=False):
x = x.type(qtype)
if is_key_padding_mask:
xdim = x.dim()
for d in range(xdim - 1, 0, -1):
x = x.squeeze(dim=d)
return x
return x.squeeze()
# forward pass
def forward(self, query, key, value, rpe=None, key_padding_mask=None, attn_mask=None):
"""Applies forward phase of sparse self attention
Arguments:
query: required: query tensor
key: required: key tensor
value: required: value tensor
rpe: optional: a tensor same dimension as x that is used as relative position embedding
key_padding_mask: optional: a mask tensor of size (BatchSize X SequenceLength)
attn_mask: optional: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported
key_padding_mask_mode: optional: a boolean determining if key_padding_mask needs to be added or multiplied
attn_mask_mode: optional: a boolean determining if attn_mask needs to be added or multiplied
Return:
attn_output: a dense tensor containing attention context
"""
assert query.dtype == torch.half, "sparse attention only supports training in fp16 currently, please file a github issue if you need fp32 support"
bsz, num_heads, tgt_len, head_dim = query.size()
# transpose back key if it is already transposed
key = self.transpose_key_for_scores(key, tgt_len)
# check that operation is supported
if query.shape != key.shape or key.shape != value.shape:
raise NotImplementedError('only self-attention is supported for now')
# squeeze key_padding_mask if it is given
if key_padding_mask is not None:
key_padding_mask = self.transpose_mask_for_sparse(query.dtype, key_padding_mask, is_key_padding_mask=True)
# squeeze attn_mask if it is given
if attn_mask is not None:
attn_mask = self.transpose_mask_for_sparse(query.dtype, attn_mask)
# cache look-up table computations etc
sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax = self.get_ops(num_heads, tgt_len)
scaling = float(head_dim)**-0.5
# attention scores
attn_output_weights = sparse_dot_sdd_nt(query, key)
attn_output_weights = sparse_softmax(attn_output_weights,
scale=scaling,
rpe=rpe,
key_padding_mask=key_padding_mask,
attn_mask=attn_mask,
key_padding_mask_mode=self.key_padding_mask_mode,
attn_mask_mode=self.attn_mask_mode)
# outputs
attn_output = sparse_dot_dsd_nn(attn_output_weights, value)
return attn_output
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from torch.nn import functional as F
from deepspeed.ops.sparse_attention import BertSparseSelfAttention, SparsityConfig
'''
This file contains few utility functions to handle adapting pretrained model with sparse self-attention module.
'''
class SparseAttentionUtils:
"""This class provides some utility functions that are use integrating sparse attention into transformer models.
Such utilities include extending position embeddings, replacing current self-attention layer with sparse attention, padding sequences to multiple of block size, etc.
"""
@staticmethod
def extend_position_embedding(model, max_position):
"""This function extends the position embedding weights of a model loaded from a checkpoint.
It assumes the new max position is bigger than the original max length.
Arguments:
model: required: a transformer model
max_position: required: an integer determining new position embedding size
Return:
model: updated model; in which position embedding weights have been extended based on new size
"""
if hasattr(model, 'bert'):
original_max_position = model.bert.embeddings.position_embeddings.weight.size(0)
assert max_position > original_max_position
extend_multiples = max(1, max_position // original_max_position)
model.bert.embeddings.position_embeddings.weight.data = model.bert.embeddings.position_embeddings.weight.repeat(
extend_multiples, 1)
elif hasattr(model, 'roberta'):
# RoBERTa has positions 0 & 1 reserved, so embedding size is max position + 2
original_max_position, embed_size = model.roberta.embeddings.position_embeddings.weight.shape
original_max_position -= 2
extend_multiples = max(1, max_position // original_max_position)
assert max_position > original_max_position
max_position += 2
extended_position_embedding = model.roberta.embeddings.position_embeddings.weight.new_empty(
max_position, embed_size)
k = 2
for i in range(extend_multiples):
extended_position_embedding[k:(
k + original_max_position)] = model.roberta.embeddings.position_embeddings.weight[2:]
k += original_max_position
model.roberta.embeddings.position_embeddings.weight.data = extended_position_embedding
else:
raise ValueError(
'Please extend \"extend_position_embedding\" function to support your model type. It currently only supports \"bert\" & \"roberta\"!'
)
model.config.max_position_embeddings = max_position
print(f'Extended position embeddings to {original_max_position * extend_multiples}')
return model
@staticmethod
def update_tokenizer_model_max_length(tokenizer, max_position):
"""This function updates the position embedding length of a tokenizer to a new max position.
Arguments:
tokenizer: required: a transformer tokenizer
max_position: required: an integer determining new position embedding size
Return:
tokenizer: updated tokenizer; in which model maximum length has been extended based on new size
"""
tokenizer.model_max_length = max_position
tokenizer.init_kwargs['model_max_length'] = max_position
print(f'updated tokenizer model max imum length to {max_position}')
return tokenizer
@staticmethod
def replace_model_self_attention_with_sparse_self_attention(
model,
max_position,
# SparsityConfig parameters needs to be set accordingly
sparsity_config=SparsityConfig(num_heads=4)):
"""This function replaces the self attention layers in model encoder with sparse self attention.
It currently supports bert and roberta model and can be easily extended to any other models following similar steps here.
For sparsityConfig, refer to the config class.
Arguments:
model: required: a transformer model
max_position: required: an integer determining new position embedding size
sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class
Return:
model: updated model; in which self attention layer has been replaced with DeepSpeed Sparse Self Attention layer.
"""
if hasattr(model, 'bert'):
model.config.max_position_embeddings = max_position
model.replace_self_attention_layer_with_sparse_self_attention_layer(model.config, model.bert.encoder.layer,
sparsity_config)
elif hasattr(model, 'roberta'):
model.config.max_position_embeddings = max_position + 2
model.replace_self_attention_layer_with_sparse_self_attention_layer(model.config,
model.roberta.encoder.layer,
sparsity_config)
else:
raise ValueError(
'Please extend \"update_model_self_attention_to_sparse_self_attention\" function to support \
your model type. It currently only supports \"bert\" & \"roberta\"!')
return model
@staticmethod
def replace_self_attention_layer_with_sparse_self_attention_layer(
config,
layers,
# SparsityConfig parameters needs to be set accordingly
sparsity_config=SparsityConfig(num_heads=4)):
"""This function replaces the self attention layers in attention layer with sparse self attention.
For sparsityConfig, refer to the config class.
Arguments:
config: required: transformer model config
layers: required: transformer model attention layers
sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class
Return:
layers: updated attention layers; in which self attention layers have been replaced with DeepSpeed Sparse Self Attention layer.
"""
for layer in layers:
deepspeed_sparse_self_attn = BertSparseSelfAttention(config, sparsity_config)
deepspeed_sparse_self_attn.query = layer.attention.self.query
deepspeed_sparse_self_attn.key = layer.attention.self.key
deepspeed_sparse_self_attn.value = layer.attention.self.value
layer.attention.self = deepspeed_sparse_self_attn
return layers
@staticmethod
def pad_to_block_size(block_size, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds,
pad_token_id, model_embeddings):
"""This function pads input tokens and attention mask on sequence length dimension to be multiple of block size.
This is a requirement for Sparse Transformer in which the self attention layer works on sequences of length multiple of block size.
It needs to be called in your model, such as BertModel, right before you calculate the embedding outputs.
Note)
1- instead of passing your embedding layer to this function, you can simply add this function to your model. It can be more simplified if given attention_mask and/or token_type_ids are none.
2- you need to call unpad function before returning your model output to unpad the encoder sequence output.
Arguments:
block_size: required: an integer determining the block size of sparsity config.
pad_token_id: required: an integer determining the pad token from the model config; such as bert.config.pad_token_id.
input_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary
attention_mask: a torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences.
token_type_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
position_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the indices of positions of each input sequence tokens in the position embeddings.
inputs_embeds: an optional torch.FloatTensor of shape [batch_size, sequence_length, hidden_size] that contains embedded representation and can be passed instead of input_ids directly.
model_embeddings: an optional object. If inputs_embeds are not none, this will be your model embeddings such as BertEmbeddings from your model such as BertModel. You can move this function inside your model and use self.embeddings instead of passing this parameter.
Return:
pad_len: an integer determining how much inputs have been padded to transfer sequence length dimension to multiple of block size.
input_ids: if input_ids are not none padded input_ids otherwise none.
attention_mask: if attention_mask is not none padded attention_mask otherwise none.
token_type_ids: if token_type_ids are not none padded token_type_ids otherwise none.
position_ids: if position_ids are not none padded position_ids otherwise none.
inputs_embeds: if inputs_embeds are not none padded inputs_embeds otherwise none.
"""
batch_size, seq_len = input_ids.shape if input_ids is not None else inputs_embeds.shape[:-1]
pad_len = (block_size - seq_len % block_size) % block_size
if pad_len > 0:
if inputs_embeds is not None:
pad_input_ids = inputs_embeds.new_full((batch_size, pad_len), pad_token_id, dtype=torch.long)
pad_inputs_embeds = model_embeddings(pad_input_ids)
inputs_embeds = torch.cat([inputs_embeds, pad_inputs_embeds], dim=-2)
# may not be needed as input_ids are not used if inputs_embeds are given
if input_ids is not None:
input_ids = F.pad(input_ids, (0, pad_len), value=pad_token_id)
if position_ids is not None:
# pad position_id with pad_token_id
position_ids = F.pad(position_ids, (0, pad_len), value=pad_token_id)
# pad attention mask without attention on the padding tokens
attention_mask = F.pad(attention_mask, (0, pad_len), value=False)
# pad token_type_ids with token_type_id = 0
token_type_ids = F.pad(token_type_ids, (0, pad_len), value=0)
return pad_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
@staticmethod
def unpad_sequence_output(pad_len, sequence_output):
"""This function unpads sequence output if inputs of the model were padded.
This is a requirement for Sparse Transformer in which the self attention layer works on sequences of length multiple of block size.
It needs to be called in your model, such as BertModel, right before you return the model outputs.
Arguments:
pad_len: required: an integer determining how much model inputs have been padded to transfer sequence length dimension to multiple of block size.
sequence_output: required: sequence output of the encoder layer.
Return:
sequence_output: unpaded sequence output of the encoder layer.
"""
if (pad_len > 0):
sequence_output = sequence_output[:, :-pad_len]
return sequence_output
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import random
class SparsityConfig:
"""Abstract Configuration class to store `sparsity configuration of a self attention layer`.
It contains shared property of different block-sparse sparsity patterns. However, each class needs to extend it based on required property and functionality.
"""
def __init__(self, num_heads, block=16, different_layout_per_head=False):
"""Initialize the Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
"""
self.num_heads = num_heads
self.block = block
self.different_layout_per_head = different_layout_per_head
self.num_layout_heads = num_heads if different_layout_per_head else 1
def setup_layout(self, seq_len):
"""Create layout tensor for the given sequence length
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) for sparsity layout of all head; initialized with zero
"""
if (seq_len % self.block != 0):
raise ValueError(f'Sequence Length, {seq_len}, needs to be dividable by Block size {self.block}!')
num_blocks = seq_len // self.block
# TODO Currently we allocate layout per head; needs to be updated if heads share a single layout.
layout = torch.zeros((self.num_heads, num_blocks, num_blocks), dtype=torch.int64)
return layout
def check_and_propagate_first_head_layout(self, layout):
"""If all heads require same sparsity layout, it propagate first head layout to all heads
Arguments:
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head
"""
if not self.different_layout_per_head:
layout[1:self.num_heads, :, :] = layout[0, :, :]
return layout
class DenseSparsityConfig(SparsityConfig):
"""Configuration class to store `Dense` configuration.
In reality, this is not sparse and all blocks are used. We keep it for the sake of comparison and comprehension.
"""
def __init__(self, num_heads, block=16, different_layout_per_head=False):
"""Initialize the Dense Sparsity Pattern Config.
In reality, this is not sparse and all blocks are used. We keep it for the sake of comparison and comprehension.
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
seq_len: required: an integer determining number of attention heads of the layer.
different_layout_per_head: optional: this is just for the sake of consistency with other sparsity formats; can ignore it for DenseSparsityConfig
"""
super().__init__(num_heads, block, different_layout_per_head)
def make_layout(self, seq_len):
"""Set 1 to all blocks of the layout meaning the pattern is dense; not sparse.
Arguments:
seq_len: required: an integer determining the underling sequence length; must be <= max sequence length
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; for dense everything is 1
"""
layout = self.setup_layout(seq_len)
layout[:, :, :] = 1
return layout
class FixedSparsityConfig(SparsityConfig):
"""Configuration class to store `Fixed` sparsity configuration.
For more details about this sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509; this has been customized.
This class extends parent class of `SparsityConfig` and customizes it for `Fixed` sparsity.
"""
def __init__(self,
num_heads,
block=16,
different_layout_per_head=False,
num_local_blocks=4,
num_global_blocks=1,
attention='bidirectional',
horizontal_global_attention=False,
num_different_global_patterns=1):
"""Initialize `Fixed` Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
num_local_blocks: optional: an integer determining the number of blocks in local attention window.
num_global_blocks: optional: an integer determining how many consecutive blocks in a local window is used as the representative of the window for global attention.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
horizontal_global_attention: optional: a boolean determining if blocks that are global representative of a local window, also attend to all other blocks. This is valid only if attention type is `bidirectional`. Looking at the attention matrix, that means global attention not only includes the vertical blocks, but also horizontal blocks.
num_different_global_patterns: optional: an integer determining number of different global attentions layouts. While global attention can be fixed by which block/s are representative of any local window, since there are multi-heads, each head can use a different global representative. For example, with 4 blocks local window and global attention size of 1 block, we can have 4 different versions in which the first, Second, third, or forth block of each local window can be global representative of that window. This parameter determines how many of such patterns we want. Of course, there is a limitation based on num_local_blocks and num_global_blocks.
"""
super().__init__(num_heads, block, different_layout_per_head)
self.num_local_blocks = num_local_blocks
if (num_local_blocks % num_global_blocks != 0):
raise ValueError(
f'Number of blocks in a local window, {num_local_blocks}, must be dividable by number of global blocks, {num_global_blocks}!'
)
self.num_global_blocks = num_global_blocks
if (attention != 'unidirectional' and attention != 'bidirectional'):
raise NotImplementedError('only \"uni/bi-directional\" attentions are supported for now!')
self.attention = attention
if (attention != 'bidirectional' and horizontal_global_attention):
raise ValueError('only \"bi-directional\" attentions can support horizontal global attention!')
self.horizontal_global_attention = horizontal_global_attention
if (num_different_global_patterns > 1 and not different_layout_per_head):
raise ValueError(
f'Number of different layouts cannot be more than one when you have set a single layout for all heads! Set different_layout_per_head to True.'
)
if (num_different_global_patterns > (num_local_blocks // num_global_blocks)):
raise ValueError(
f'Number of layout versions (num_different_global_patterns), {num_different_global_patterns}, cannot be larger than number of local window blocks divided by number of global blocks, {num_local_blocks} / {num_global_blocks} = {num_local_blocks//num_global_blocks}!'
)
self.num_different_global_patterns = num_different_global_patterns
def set_local_layout(self, h, layout):
"""Sets local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local layout is set
"""
num_blocks = layout.shape[1]
for i in range(0, num_blocks, self.num_local_blocks):
end = min(i + self.num_local_blocks, num_blocks)
for row in range(i, end):
for col in range(i, (row + 1 if self.attention == 'unidirectional' else end)):
layout[h, row, col] = 1
return layout
def set_global_layout(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Currently we set global blocks starting from the last block of a local window to the first one. That means if a local window consists of 4 blocks and global attention size is one block, we use block #4 in each local window as global. If we have different layout per head, then other heads will get #3, #2, and #1. And if we have more heads (and different layout has set) than num of global attentions, multiple head may have same global attentions.
Note) if horizontal_global_attention is set, global blocks will be set both horizontally and vertically.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
first_global_block_idx = self.num_local_blocks - (
1 + h % self.num_different_global_patterns) * self.num_global_blocks
# set all global blocks except the last one if (in last local window)
end = num_blocks - (num_blocks % self.num_local_blocks)
for i in range(first_global_block_idx, end, self.num_local_blocks):
# vertical global attention
first_row = 0 if self.attention == 'bidirectional' else i
#(((i // self.num_local_blocks) + 1) * self.num_local_blocks)
#if (first_row < num_blocks):
layout[h, first_row:, i:i + self.num_global_blocks] = 1
# horizontal global attention; only in bidirectional attention
if (self.horizontal_global_attention):
layout[h, i:i + self.num_global_blocks, :] = 1
# set last global blocks; handle possible short last local window
if (end < num_blocks):
start = min(end + first_global_block_idx, num_blocks - self.num_global_blocks)
end = start + self.num_global_blocks
# vertical global attention
first_row = 0 if self.attention == 'bidirectional' else start
#(((start // self.num_local_blocks) + 1) * self.num_local_blocks)
#if (first_row < num_blocks):
layout[h, first_row:, start:end] = 1
# horizontal global attention
if (self.horizontal_global_attention):
layout[h, start:end, :] = 1
return layout
def make_layout(self, seq_len):
"""Generates `Fixed` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `Fixed` sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_local_layout(h, layout)
layout = self.set_global_layout(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
class VariableSparsityConfig(SparsityConfig):
"""Configuration class to store `Variable` sparsity configuration.
This layout is an extension of FixedSparsityConfig in which:
- user can set random layout; default value is zero means no random block
- user can provide a list of local block sizes
- user can provide a list of global block indices.
For more details about `Fixed` sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509; this has been customized.
This class extends parent class of `SparsityConfig` and customizes it for `Fixed` sparsity.
"""
def __init__(self,
num_heads,
block=16,
different_layout_per_head=False,
num_random_blocks=0,
local_window_blocks=[4],
global_block_indices=[0],
global_block_end_indices=None,
attention='bidirectional',
horizontal_global_attention=False):
"""Initialize `Variable` Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability. Currently this sparsity config can only assign single layout to all heads; needs to be extended for different layout per head.
num_random_blocks: optional: an integer determining the number of random blocks in each block row.
local_window_blocks: optional: a list of integers determining the number of blocks in each local attention window. It assumes first number determines # of blocks in the first local window, second the second window, ..., and the last number determines the number of blocks in the remaining local windows.
global_block_indices: optional: a list of integers determining which blocks are considered as global attention. Given indices, determine the blocks that all other token blocks attend to and they attend to all other token blocks. Default value is only index 0. Notice that if global_block_end_indices parameter is set, this parameter is used as starting index of each global window.
global_block_end_indices: optional: a list of integers determining end indices of global window blocks. By default this is not used. But if it is set, it must have the same size of global_block_indices parameter, and combining this two parameters, for each index i, blocks from global_block_indices[i] to global_block_end_indices[i] (exclusive) are considered as global attention.
num_global_blocks: optional: an integer determining how many consecutive blocks in a local window is used as the representative of the window for global attention.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
horizontal_global_attention: optional: a boolean determining if blocks that are global representative of a local window, also attend to all other blocks. This is valid only if attention type is `bidirectional`. Looking at the attention matrix, that means global attention not only includes the vertical blocks, but also horizontal blocks.
"""
super().__init__(num_heads, block, different_layout_per_head)
self.num_random_blocks = num_random_blocks
self.local_window_blocks = local_window_blocks
self.global_block_indices = global_block_indices
if (global_block_end_indices is not None):
if (len(global_block_indices) != len(global_block_end_indices)):
raise ValueError(
f'Global block start indices length, {len(global_block_indices)}, must be same as global block end indices length, {len(global_block_end_indices)}!'
)
for _, (start_idx, end_idx) in enumerate(zip(global_block_indices, global_block_end_indices)):
if start_idx >= end_idx:
raise ValueError(
f'Global block start index, {start_idx}, must be smaller than global block end index, {end_idx}!'
)
self.global_block_end_indices = global_block_end_indices
if (attention != 'unidirectional' and attention != 'bidirectional'):
raise NotImplementedError('only \"uni/bi-directional\" attentions are supported for now!')
self.attention = attention
if (attention != 'bidirectional' and horizontal_global_attention):
raise ValueError('only \"bi-directional\" attentions can support horizontal global attention!')
self.horizontal_global_attention = horizontal_global_attention
def set_random_layout(self, h, layout):
"""Sets random attention layout used by the given head in the sparse attention.
Note) By default, it assumes there will be a unique random block layout for all heads; unless `different_layout_per_head` parameter is set in which each head can have a different random layout.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which random layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_random_blocks):
raise ValueError(
f'Number of random blocks, {self.num_random_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
for row in range(0, num_blocks):
rnd_cols = random.sample(range(0, num_blocks), self.num_random_blocks)
layout[h, row, rnd_cols] = 1
return layout
def set_local_layout(self, h, layout):
"""Sets local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local layout is set
"""
num_blocks = layout.shape[1]
start_block_idx = 0
end_block_idx = 0
for block_size in self.local_window_blocks:
end_block_idx += block_size
end_block_idx = min(end_block_idx, num_blocks)
for row in range(start_block_idx, end_block_idx):
for col in range(start_block_idx, (row + 1 if self.attention == 'unidirectional' else end_block_idx)):
layout[h, row, col] = 1
start_block_idx += block_size
# if there is any remaining not attended part, use the lats local window block size as local window for the remaining applicable local windows
for i in range(start_block_idx, num_blocks, block_size):
end_block_idx = min(i + block_size, num_blocks)
for row in range(i, end_block_idx):
for col in range(i, (row + 1 if self.attention == 'unidirectional' else end_block_idx)):
layout[h, row, col] = 1
return layout
def set_global_layout(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
if (self.global_block_end_indices is None):
for idx in self.global_block_indices:
# if global block idx is in the range of the sequence blocks
if (idx < num_blocks):
#global rows
if (self.horizontal_global_attention):
layout[h, idx, :] = 1
#global columns
first_row = 0 if self.attention == 'bidirectional' else idx
layout[h, first_row:, idx] = 1
else:
for _, (start_idx, end_idx) in enumerate(zip(self.global_block_indices, self.global_block_end_indices)):
# if global block idx is in the range of the sequence blocks
if (start_idx < num_blocks):
end_idx = min(end_idx, num_blocks)
#global rows
if (self.horizontal_global_attention):
layout[h, start_idx:end_idx, :] = 1
#global columns
first_row = 0 if self.attention == 'bidirectional' else start_idx
layout[h, first_row:, start_idx:end_idx] = 1
return layout
def make_layout(self, seq_len):
"""Generates `Variable` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `Variable` sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_random_layout(h, layout)
layout = self.set_local_layout(h, layout)
layout = self.set_global_layout(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
class BigBirdSparsityConfig(SparsityConfig):
"""Configuration class to store `BigBird` sparsity configuration.
For more details about this sparsity config, please see `Big Bird: Transformers for Longer Sequences`: https://arxiv.org/pdf/2007.14062.pdf
This class extends parent class of `SparsityConfig` and customizes it for `BigBird` sparsity.
"""
def __init__(self,
num_heads,
block=16,
different_layout_per_head=False,
num_random_blocks=1,
num_sliding_window_blocks=3,
num_global_blocks=1,
attention='bidirectional'):
"""Initialize the BigBird Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
num_random_blocks: optional: an integer determining the number of random blocks in each block row.
num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window.
num_global_blocks: optional: an integer determining how many consecutive blocks, starting from index 0, are considered as global attention. Global block tokens will be attended by all other block tokens and will attend to all other block tokens as well.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
"""
super().__init__(num_heads, block, different_layout_per_head)
self.num_random_blocks = num_random_blocks
self.num_sliding_window_blocks = num_sliding_window_blocks
self.num_global_blocks = num_global_blocks
if (attention != 'unidirectional' and attention != 'bidirectional'):
raise NotImplementedError('only \"uni/bi-directional\" attentions are supported for now!')
self.attention = attention
def set_random_layout(self, h, layout):
"""Sets random attention layout used by the given head in the sparse attention.
Note) By default, it assumes there will be a unique random block layout for all heads; unless `different_layout_per_head` parameter is set in which each head can have a different random layout.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which random layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_random_blocks):
raise ValueError(
f'Number of random blocks, {self.num_random_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
for row in range(0, num_blocks):
sample_range = range(0, num_blocks) if self.attention == 'bidirectional' else range(0, row + 1)
rnd_cols = random.sample(sample_range, self.num_random_blocks)
layout[h, row, rnd_cols] = 1
return layout
def set_sliding_window_layout(self, h, layout):
"""Sets sliding local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_sliding_window_blocks):
raise ValueError(
f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
w = self.num_sliding_window_blocks // 2
for row in range(0, num_blocks):
start = max(0, row - w)
end = min(row + w + 1, num_blocks)
layout[h, row, start:end] = 1
return layout
def set_global_layout_itc(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_global_blocks):
raise ValueError(
f'Number of global blocks, {self.num_global_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
#global rows
layout[h, 0:self.num_global_blocks, :] = 1
#global columns
layout[h, :, 0:self.num_global_blocks] = 1
if self.attention == 'unidirectional':
# zero out anything attending to the future
layout = torch.tril(layout)
return layout
def make_layout(self, seq_len):
"""Generates `BigBird` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BigBird` sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_random_layout(h, layout)
layout = self.set_sliding_window_layout(h, layout)
layout = self.set_global_layout_itc(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
class BSLongformerSparsityConfig(SparsityConfig):
"""Configuration class to store edited `Longformer` sparsity configuration.
Note) this is a block-sparse version of the Longformer which is slightly different than original Longformer; which is element-wise sparsity.
For more details about this sparsity config, please see `Longformer: The Long-Document Transformer`: https://arxiv.org/pdf/2004.05150.pdf
This class extends parent class of `SparsityConfig` and customizes it for `Longformer` sparsity.
"""
def __init__(self,
num_heads,
block=16,
different_layout_per_head=False,
num_sliding_window_blocks=3,
global_block_indices=[0],
global_block_end_indices=None,
attention='bidirectional'):
"""Initialize the edited `Longformer` Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window.
global_block_indices: optional: a list of integers determining which blocks are considered as global attention. Given indices, determine the blocks that all other token blocks attend to and they attend to all other token blocks. Default value is only index 0. Notice that if global_block_end_indices parameter is set, this parameter is used as starting index of each global window.
global_block_end_indices: optional: a list of integers determining end indices of global window blocks. By default this is not used. But if it is set, it must have the same size of global_block_indices parameter, and combining this two parameters, for each index i, blocks from global_block_indices[i] to global_block_end_indices[i] (exclusive) are considered as global attention.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
"""
super().__init__(num_heads, block, different_layout_per_head)
self.num_sliding_window_blocks = num_sliding_window_blocks
self.global_block_indices = global_block_indices
self.attention = attention
if (global_block_end_indices is not None):
if (len(global_block_indices) != len(global_block_end_indices)):
raise ValueError(
f'Global block start indices length, {len(global_block_indices)}, must be same as global block end indices length, {len(global_block_end_indices)}!'
)
for _, (start_idx, end_idx) in enumerate(zip(global_block_indices, global_block_end_indices)):
if start_idx >= end_idx:
raise ValueError(
f'Global block start index, {start_idx}, must be smaller than global block end index, {end_idx}!'
)
self.global_block_end_indices = global_block_end_indices
def set_sliding_window_layout(self, h, layout):
"""Sets sliding local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_sliding_window_blocks):
raise ValueError(
f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
w = self.num_sliding_window_blocks // 2
for row in range(0, num_blocks):
start = max(0, row - w)
end = min(row + w + 1, num_blocks)
layout[h, row, start:end] = 1
return layout
def set_global_layout(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
if (self.global_block_end_indices is None):
for idx in self.global_block_indices:
# if global block idx is in the range of the sequence blocks
if (idx < num_blocks):
#global rows
layout[h, idx, :] = 1
#global columns
layout[h, :, idx] = 1
else:
for _, (start_idx, end_idx) in enumerate(zip(self.global_block_indices, self.global_block_end_indices)):
# if global block idx is in the range of the sequence blocks
if (start_idx < num_blocks):
end_idx = min(end_idx, num_blocks)
#global rows
layout[h, start_idx:end_idx, :] = 1
#global columns
layout[h, :, start_idx:end_idx] = 1
if self.attention == 'unidirectional':
layout = torch.tril(layout)
return layout
def make_layout(self, seq_len):
"""Generates edited `Longformer` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BSLongformer` sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_sliding_window_layout(h, layout)
layout = self.set_global_layout(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
class LocalSlidingWindowSparsityConfig(SparsityConfig):
"""Configuration class to store `Local Sliding Window` sparsity configuration - a purely-local sliding window attention.
This class extends parent class of `SparsityConfig` and customizes it for `Local` sparsity.
"""
def __init__(self, num_heads, block=16, num_sliding_window_blocks=3, attention='unidirectional'):
"""Initialize the Local Sliding Window Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
"""
super().__init__(num_heads, block)
self.num_sliding_window_blocks = num_sliding_window_blocks
self.attention = attention
def set_sliding_window_layout(self, h, layout):
"""Sets sliding local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_sliding_window_blocks):
raise ValueError(
f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
w = self.num_sliding_window_blocks // 2
for row in range(0, num_blocks):
start = max(0, row - w)
end = min(row + w + 1, num_blocks) if self.attention == "bidirectional" else row + 1
layout[h, row, start:end] = 1
return layout
def make_layout(self, seq_len):
"""Generates `Local Sliding Window` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BigBird` sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_sliding_window_layout(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a
# https://github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/matmul.py
import torch
import triton
import triton.language as tl
def next_power_of_2(n):
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n += 1
return n
def num_warps(n):
if n < 512:
return 4
if n < 2048:
return 8
return 16
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[6] * meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[6] * meta['BLOCK'])})
@triton.jit
def _forward(X, scale, LUT, RPE, KP_M, ATTN_M, sizemax, stride_zx, stride_zrpe, stride_hrpe, stride_srpe, stride_zkpm,
stride_zattnm, **meta):
TN = meta['TN']
BLOCK = meta['BLOCK']
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from LUT
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# block id and column id
blockid = tl.load(LUT + offset + rbmn * 4 + 0)
columnid = tl.load(LUT + offset + rbmn * 4 + 1)
rowid = tl.load(LUT + offset + rbmn * 4 + 2)
headid = tl.load(LUT + offset + rbmn * 4 + 3)
# pointers to X
px = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
x = tl.load(px, mask=check, other=-float('inf'))
x = x.to(tl.float32)
# apply scale
if meta['APPLY_SCALE']:
x = x * scale
# apply RPE
if meta['APPLY_RPE']:
prpe = RPE + pidz * stride_zrpe + headid * stride_hrpe + columnid * BLOCK + rowid * BLOCK * stride_srpe + rxm * stride_srpe + rxn
rpe = tl.load(prpe, mask=check, other=0)
x = x + rpe
# apply key-padding mask
if meta['APPLY_KP_MASK']:
pkp_m = KP_M + pidz * stride_zkpm + columnid * BLOCK + rxn
kp_m = tl.load(pkp_m, mask=check, other=-float('inf'))
if meta['KP_MASK_MUL']:
kp_m = tl.where(kp_m == 0, -float('inf'), 0.)
x = x + kp_m
# apply attention mask
if meta['APPLY_ATTN_MASK']:
pattn_m = ATTN_M + columnid * BLOCK + rowid * BLOCK * stride_zattnm + rxm * stride_zattnm + rxn
attn_m = tl.load(pattn_m, mask=check, other=-float('inf'))
if meta['ATTN_MASK_MUL']:
attn_m = tl.where(attn_m == 0, -float('inf'), 0.)
x = x + attn_m
# computation
x = tl.softmax(x)
tl.store(px, x, mask=check)
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[4] * meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[4]) * meta['BLOCK']})
@triton.jit
def _backward(X, scale, DX, LUT, sizemax, stride_zx, stride_zdx, **meta):
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
TN = meta['TN']
BLOCK = meta['BLOCK']
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from look-up table
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
# bounds checking on lut
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# initialize pointers to block-sparse input
blockid = tl.load(LUT + offset + rbmn * 4)
X = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
DX = DX + pidz * stride_zdx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
# compute fused softmax backward
x = tl.load(X, mask=check, other=0)
dx = tl.load(DX, mask=check, other=0)
x = x.to(tl.float32)
dx = dx.to(tl.float32)
y = x * (dx - tl.sum(x * dx, 0)) * scale
tl.store(DX, y, mask=check)
class _sparse_softmax(torch.autograd.Function):
bwd_kernels = dict()
@staticmethod
def make_lut(layout, block, device):
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
sizes = _empty.clone()
# sizes along rows
for h in range(layout.shape[0]):
sizes = torch.cat((sizes, layout[h, :, :].sum(-1)))
# offsets in block format
offsets = torch.zeros_like(sizes)
offsets[1:] = torch.cumsum(sizes[:-1], dim=0)
# block indices
idx = torch.arange(layout.sum())
head = layout.nonzero()[:, 0]
rows = layout.nonzero()[:, 1]
columns = layout.nonzero()[:, 2]
core = torch.stack((idx, columns, rows, head), dim=1).view(-1)
# construct look-up table
offsets = offsets * 4 + 2 * sizes.numel()
header = torch.stack((sizes, offsets), dim=1).view(-1)
lut = torch.cat((header, core)).type(torch.int32).to(device)
return lut, int(sizes.max())
@staticmethod
def forward(ctx, x, scale, rpe, key_padding_mask, attn_mask, kp_mask_mode, attn_mask_mode, spdims, block, lut,
num_blocks, maxlut, bench, time):
apply_scale = False if scale == 1.0 else True
# handle None rpe
if rpe is None:
apply_rpe = False
stride_zrpe, stride_hrpe, stride_srpe = 0, 0, 0
rpe = torch.empty(0, dtype=x.dtype, device=x.device)
else:
apply_rpe = True
stride_zrpe, stride_hrpe, stride_srpe = rpe.stride(0), rpe.stride(1), rpe.stride(2)
# handle None key_padding_mask
if key_padding_mask is None:
apply_kp_mask = False
stride_zkpm = 0
key_padding_mask = torch.empty(0, dtype=x.dtype, device=x.device)
else:
apply_kp_mask = True
stride_zkpm = key_padding_mask.stride(0)
# handle None attention_mask
if attn_mask is None:
apply_attn_mask = False
stride_zattnm = 0
attn_mask = torch.empty(0, dtype=x.dtype, device=x.device)
else:
apply_attn_mask = True
stride_zattnm = attn_mask.stride(0)
# run kernel
M = x.shape[0]
meta = {
'BLOCK': block,
'APPLY_SCALE': apply_scale,
'APPLY_RPE': apply_rpe,
'APPLY_KP_MASK': apply_kp_mask,
'APPLY_ATTN_MASK': apply_attn_mask,
'KP_MASK_MUL': kp_mask_mode == 'mul',
'ATTN_MASK_MUL': attn_mask_mode == 'mul',
}
grid = lambda opt: [spdims[0] * spdims[1] * block, M]
_forward[grid](x, scale, lut, rpe, key_padding_mask, attn_mask, maxlut, x.stride(0),\
stride_zrpe, stride_hrpe, stride_srpe, stride_zkpm, stride_zattnm, **meta)
# save to context
ctx.mark_dirty(x)
ctx.save_for_backward(x, lut)
ctx.spdims = spdims
ctx.block = block
ctx.maxlut = maxlut
ctx.scale = scale
ctx.apply_scale = apply_scale
ctx.apply_rpe = apply_rpe
ctx.apply_kp_mask = apply_kp_mask
ctx.apply_attn_mask = apply_attn_mask
ctx.kp_mask_mode = kp_mask_mode
ctx.attn_mask_mode = attn_mask_mode
return x
@staticmethod
def backward(ctx, dx):
# retrieve from context
x, lut = ctx.saved_tensors
# run kernel
M = x.shape[0]
grid = lambda opt: [ctx.spdims[0] * ctx.spdims[1] * ctx.block, M]
_backward[grid](x, ctx.scale, dx, lut, ctx.maxlut, x.stride(0), dx.stride(0), BLOCK=ctx.block)
return dx, None, None, None, None, None, None, None, None, None, None, None, None, None, None
class Softmax:
"""Block-Sparse Softmax class; this class computes softmax on a block sparse matrix. It is also able to apply either/all of the following masks:
- relative position embedding
- key padding mask
- attention mask
For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
"""
def sparse_softmax(*args, **kwargs):
return _sparse_softmax.apply(*args, **kwargs)
def make_lut(self, device):
"""Generates the sparsity layout used in block-sparse softmax
"""
key = (device, )
if key not in self.lut_cache:
self.lut_cache[key] = _sparse_softmax.make_lut(self.layout, self.block, device)
return self.lut_cache[key]
def __init__(self, layout, block, bench=False):
"""Initialize the Block-Sparse Softmax class.
Arguments:
layout: required: sparsity layout tensor
block: required: an integer determining the block size.
bench: optional: set if you want to do benchmarking
"""
self.num_blocks = layout.sum().item()
self.spdims = layout.shape
self.layout = layout
self.block = block
self.bench = bench
self.lut_cache = dict()
def __call__(self,
x,
scale=1.,
rpe=None,
key_padding_mask=None,
attn_mask=None,
key_padding_mask_mode='add',
attn_mask_mode='add'):
"""Applies softmax on a Block-Sparse input tensor.
For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
Arguments:
x: required: a block-sparse tensor that softmax is applied on it; computation will be in place and result will be returned in the same tensor
scale: optional: a float value; x values will be multiplied by this value before normalization. Default value is 1.0.
rpe: optional: a tensor same dimension as x that is used as relative position embedding
key_padding_mask: optional: a mask tensor of size (BatchSize X SequenceLength)
attn_mask: optional: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported
key_padding_mask_mode: optional: a boolean determining if key_padding_mask needs to be added or multiplied
attn_mask_mode: optional: a boolean determining if attn_mask needs to be added or multiplied
Return:
x: a block-sparse tensor contains normalized input x using softmax; and masks applied if given
"""
time_y = [None]
if rpe is not None and rpe.dtype != x.dtype:
raise ValueError('relative position embedding must be %s' % x.dtype)
if attn_mask is not None and attn_mask.dtype != x.dtype:
raise ValueError('Attention mask must be %s' % x.dtype)
if key_padding_mask is not None and key_padding_mask.dtype != x.dtype:
raise ValueError('Key padding mask must be %s' % x.dtype)
lut, maxlut = self.make_lut(x.device)
x = Softmax.sparse_softmax(x, scale, rpe, key_padding_mask, attn_mask, key_padding_mask_mode, attn_mask_mode,
self.spdims, self.block, lut, self.num_blocks, maxlut, self.bench, time_y)
self.time_y = time_y[0]
return x
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import sys
import os
def _build_file_index(directory, suffix='.tr'):
"""Build an index of source files and their basenames in a given directory.
Args:
directory (string): the directory to index
suffix (string): index files with this suffix
Returns:
list: A list of tuples of the form [(basename, absolute path), ...]
"""
index = []
for fname in os.listdir(directory):
if fname.endswith(suffix):
basename = fname[:fname.rfind(suffix)] # strip the suffix
path = os.path.join(directory, fname)
index.append((basename, path))
return index
# Go over all local source files and parse them as strings
_module = sys.modules[_build_file_index.__module__]
_directory = os.path.dirname(os.path.realpath(__file__))
for name, fname in _build_file_index(_directory):
with open(fname, 'r') as fin:
setattr(_module, name, fin.read())
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
from .inference.config import DeepSpeedInferenceConfig
from ...model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
from .inference.moe_inference import DeepSpeedMoEInferenceConfig, DeepSpeedMoEInference
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import json
import math
import torch
from torch import nn
from torch.autograd import Function
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import TransformerBuilder, StochasticTransformerBuilder
# Cuda modules will be imported if needed
transformer_cuda_module = None
stochastic_transformer_cuda_module = None
class TransformerConfig():
def __init__(self, batch_size, hidden_size, intermediate_size, heads, attn_dropout_ratio, hidden_dropout_ratio,
num_hidden_layers, initializer_range):
self.layer_id = -1
self.batch_size = batch_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.heads = heads
self.attn_dropout_ratio = attn_dropout_ratio
self.hidden_dropout_ratio = hidden_dropout_ratio
self.num_hidden_layers = num_hidden_layers
self.initializer_range = initializer_range
class DeepSpeedTransformerConfig(TransformerConfig):
"""Initialize the DeepSpeed Transformer Config.
Arguments:
batch_size: The maximum batch size used for running the kernel on each GPU
hidden_size: The hidden size of the transformer layer
intermediate_size: The intermediate size of the feed-forward part of transformer layer
heads: The number of heads in the self-attention of the transformer layer
attn_dropout_ratio: The ratio of dropout for the attention's output
hidden_dropout_ratio: The ratio of dropout for the transformer's output
num_hidden_layers: The number of transformer layers
initializer_range: BERT model's initializer range for initializing parameter data
local_rank: Optional: The rank of GPU running the transformer kernel, it is not required
to use if the model already set the current device, otherwise need to set it
so that the transformer kernel can work on the right device
seed: The random seed for the dropout layers
fp16: Enable half-precision computation
pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture
normalize_invertible: Optional: Enable invertible LayerNorm execution (dropping the input activation),
default is False
gelu_checkpoint: Optional: Enable checkpointing of Gelu activation output to save memory,
default is False
adjust_init_range: Optional: Set as True (default) if the model adjusts the weight initial values of
its self-attention output and layer output, False keeps the initializer_range no change.
See the adjustment below:
output_std = self.config.initializer_range / math.sqrt(2.0 * num_layers)
attn_dropout_checkpoint: Optional: Enable checkpointing of attention dropout to save memory,
default is False
stochastic_mode: Enable for high performance, please note that this flag has some level of
non-determinism and can produce different results on different runs. However, we have seen
that by enabling it, the pretraining tasks such as BERT are not affected and can obtain
a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend
to turn it off in order to be able to reproduce the same result through the regular kernel execution.
return_tuple: Enable if using the return_tuple interface style for sending out the forward results.
training: Enable for training rather than inference.
"""
def __init__(self,
batch_size=-1,
hidden_size=-1,
intermediate_size=-1,
heads=-1,
attn_dropout_ratio=-1,
hidden_dropout_ratio=-1,
num_hidden_layers=-1,
initializer_range=-1,
layer_norm_eps=1e-12,
local_rank=-1,
seed=-1,
fp16=False,
pre_layer_norm=True,
normalize_invertible=False,
gelu_checkpoint=False,
adjust_init_range=True,
attn_dropout_checkpoint=False,
stochastic_mode=False,
return_tuple=False,
training=True):
super(DeepSpeedTransformerConfig,
self).__init__(batch_size, hidden_size,
(intermediate_size if intermediate_size > 0 else 4 * hidden_size), heads,
attn_dropout_ratio, hidden_dropout_ratio, num_hidden_layers, initializer_range)
self.fp16 = fp16
self.pre_layer_norm = pre_layer_norm
self.local_rank = local_rank
self.seed = seed
self.normalize_invertible = normalize_invertible
self.gelu_checkpoint = gelu_checkpoint # True: if higher batch size is required
self.adjust_init_range = adjust_init_range
self.test_gemm = False
self.layer_norm_eps = layer_norm_eps
self.training = training
self.is_grad_enabled = True
self.attn_dropout_checkpoint = attn_dropout_checkpoint
self.stochastic_mode = stochastic_mode
self.return_tuple = return_tuple
@classmethod
def from_dict(cls, json_object):
config = DeepSpeedTransformerConfig()
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
with open(json_file, "r", encoding='utf-16') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
class DeepSpeedTransformerFunction(Function):
@staticmethod
def forward(ctx, input, input_mask, self, grads, layer_id, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw,
attn_nb, inter_w, inter_b, output_w, output_b, norm_w, norm_b, config):
cuda_module = stochastic_transformer_cuda_module if config.stochastic_mode else transformer_cuda_module
forward_func = cuda_module.forward_fp16 if config.fp16 else cuda_module.forward_fp32
inp_size = input.size()
if inp_size[1] % 16 != 0:
input = torch.cat(
(input,
torch.randn(
(inp_size[0], (16 - (inp_size[1] % 16)), inp_size[2]), device=input.device, dtype=input.dtype)),
1)
input_mask = torch.cat((input_mask, torch.ones((inp_size[0], input_mask.shape[1], input_mask.shape[2], \
(16 - (inp_size[1] % 16))), device=input_mask.device, dtype=input_mask.dtype) * -10000), 3)
(output, inp_norm, qkv_tf, soft_inp, ctx_bufB, attn_o_inp, add_res, ff1_inp, gelu_inp, ff2_inp,
attn_prob_dropout_mask, attn_output_dropout_mask, layer_output_dropout_mask, attn_layer_norm_var,
attn_layer_norm_mean, layer_norm_var, layer_norm_mean) = forward_func(
config.layer_id, input, input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w,
inter_b, output_w, output_b, norm_w, norm_b, config.training and config.is_grad_enabled,
config.pre_layer_norm, config.attn_dropout_checkpoint, config.normalize_invertible,
config.gelu_checkpoint)
# For testing only.
if grads is not None:
for i in [2]:
attn_qkvw.register_hook(lambda x, i=i, self=self: grads.append([
x[i * attn_ow.size(0):(i + 1) * attn_ow.size(0)], ("Q_W" if i == 0 else "K_W" if i == 1 else "V_W")
]))
for i in [2]:
attn_qkvb.register_hook(lambda x, i=i, self=self: grads.append([
x[i * attn_ow.size(0):(i + 1) * attn_ow.size(0)], ("Q_B" if i == 0 else "K_B" if i == 1 else "V_B")
]))
attn_ow.register_hook(lambda x, self=self: grads.append([x, "O_W"]))
attn_ob.register_hook(lambda x, self=self: grads.append([x, "O_B"]))
attn_nw.register_hook(lambda x, self=self: grads.append([x, "N2_W"]))
attn_nb.register_hook(lambda x, self=self: grads.append([x, "N2_B"]))
inter_w.register_hook(lambda x, self=self: grads.append([x, "int_W"]))
inter_b.register_hook(lambda x, self=self: grads.append([x, "int_B"]))
output_w.register_hook(lambda x, self=self: grads.append([x, "out_W"]))
output_b.register_hook(lambda x, self=self: grads.append([x, "out_B"]))
norm_w.register_hook(lambda x, self=self: grads.append([x, "norm_W"]))
norm_b.register_hook(lambda x, self=self: grads.append([x, "norm_B"]))
if config.is_grad_enabled and config.training:
if (config.pre_layer_norm and config.normalize_invertible):
ctx.save_for_backward(input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w,
inter_b, output_w, output_b, norm_w, norm_b)
else:
ctx.save_for_backward(output, input, input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw,
attn_nb, inter_w, inter_b, output_w, output_b, norm_w, norm_b)
ctx.config = config
if (config.pre_layer_norm or not config.normalize_invertible):
ctx.inp_norm = inp_norm
ctx.qkv_tf = qkv_tf
ctx.soft_inp = soft_inp
if not config.attn_dropout_checkpoint:
ctx.ctx_bufB = ctx_bufB
ctx.attn_o_inp = attn_o_inp
if not config.normalize_invertible:
ctx.add_res = add_res
ctx.attn_layer_norm_mean = attn_layer_norm_mean
ctx.layer_norm_mean = layer_norm_mean
ctx.ff1_inp = ff1_inp
if not config.gelu_checkpoint:
ctx.gelu_inp = gelu_inp
ctx.ff2_inp = ff2_inp
ctx.attn_prob_dropout_mask = attn_prob_dropout_mask
ctx.attn_output_dropout_mask = attn_output_dropout_mask
ctx.layer_output_dropout_mask = layer_output_dropout_mask
ctx.attn_layer_norm_var = attn_layer_norm_var
ctx.layer_norm_var = layer_norm_var
if inp_size[1] % 16 != 0:
output = torch.narrow(output, 1, 0, inp_size[1])
if config.return_tuple:
return (output, ) # outputs -> (output) : outputs[0] = output
else:
return output
@staticmethod
def backward(ctx, grad_output):
bsz = grad_output.shape[0]
grad_output_shape = grad_output.size()
if grad_output_shape[1] % 16 != 0:
grad_output = torch.cat((grad_output, torch.zeros((bsz, (16 - (grad_output_shape[1] % 16)), \
grad_output_shape[2]), device=grad_output.device, dtype=grad_output.dtype)), 1)
assert ctx.config.training
if (ctx.config.pre_layer_norm and ctx.config.normalize_invertible):
(input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w, inter_b, output_w,
output_b, norm_w, norm_b) = ctx.saved_tensors
else:
(output, input, input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w, inter_b,
output_w, output_b, norm_w, norm_b) = ctx.saved_tensors
cuda_module = stochastic_transformer_cuda_module if ctx.config.stochastic_mode else transformer_cuda_module
backward_func = cuda_module.backward_fp16 if ctx.config.fp16 else cuda_module.backward_fp32
(grad_input, grad_attn_qkvw, grad_attn_qkvb, grad_attn_ow, grad_attn_ob, grad_attn_nw, grad_attn_nb,
grad_inter_w, grad_inter_b, grad_output_w, grad_output_b, grad_norm_w, grad_norm_b) = backward_func(
ctx.config.layer_id, grad_output,
(ctx.inp_norm if (ctx.config.pre_layer_norm and ctx.config.normalize_invertible) else output),
(ctx.inp_norm if (ctx.config.pre_layer_norm or not ctx.config.normalize_invertible) else input),
ctx.qkv_tf, ctx.soft_inp, (ctx.soft_inp if ctx.config.attn_dropout_checkpoint else ctx.ctx_bufB),
ctx.attn_o_inp, (ctx.ff1_inp if ctx.config.normalize_invertible else ctx.add_res), ctx.ff1_inp,
(ctx.ff2_inp if ctx.config.gelu_checkpoint else ctx.gelu_inp), ctx.ff2_inp, ctx.attn_prob_dropout_mask,
ctx.attn_output_dropout_mask, ctx.layer_output_dropout_mask, ctx.attn_layer_norm_var,
ctx.attn_layer_norm_mean, ctx.layer_norm_var, ctx.layer_norm_mean,
(ctx.inp_norm if
(ctx.config.pre_layer_norm and ctx.config.normalize_invertible) else input), input_mask, attn_qkvw,
attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w, inter_b, output_w, output_b, norm_w, norm_b)
# This appears to be an effective way to release context memory
ctx.qkv_tf = None
ctx.soft_inp = None
ctx.ctx_bufB = None
ctx.gelu_inp = None
ctx.ff2_inp = None
ctx.attn_o_inp = None
ctx.ff1_inp = None
ctx.add_res = None
ctx.inp_norm = None
ctx.config = None
ctx.attn_layer_norm_mean = None
ctx.layer_norm_mean = None
ctx.attn_prob_dropout_mask = None
ctx.attn_output_dropout_mask = None
ctx.layer_output_dropout_mask = None
ctx.attn_layer_norm_var = None
ctx.layer_norm_var = None
if grad_output_shape[1] % 16 != 0:
grad_input = torch.narrow(grad_input, 1, 0, grad_output_shape[1])
return (grad_input, None, None, None, None, grad_attn_qkvw, grad_attn_qkvb, grad_attn_ow, grad_attn_ob,
grad_attn_nw, grad_attn_nb, grad_inter_w, grad_inter_b, grad_output_w, grad_output_b, grad_norm_w,
grad_norm_b, None)
class DeepSpeedTransformerLayer(nn.Module):
"""Initialize the DeepSpeed Transformer Layer.
Static variable:
layer_id: The layer-index counter starting from 0 and incrementing by 1 every time a layer object is instantiated,
e.g. if a model has 24 transformer layers, layer_id goes from 0 to 23.
Arguments:
config: An object of DeepSpeedTransformerConfig
initial_weights: Optional: Only used for unit test
initial_biases: Optional: Only used for unit test
"""
layer_id = 0
def __init__(self, config, initial_weights=None, initial_biases=None):
super(DeepSpeedTransformerLayer, self).__init__()
self.config = config
self.config.layer_id = DeepSpeedTransformerLayer.layer_id
DeepSpeedTransformerLayer.layer_id = DeepSpeedTransformerLayer.layer_id + 1
print("DeepSpeed Transformer config is ", self.config.__dict__)
if self.config.local_rank >= 0:
get_accelerator().set_device(self.config.local_rank)
if initial_weights is None and initial_biases is None:
self.attn_qkvw = nn.Parameter(torch.Tensor(self.config.hidden_size * 3, self.config.hidden_size))
self.attn_qkvb = nn.Parameter(torch.Tensor(self.config.hidden_size * 3))
self.attn_ow = nn.Parameter(torch.Tensor(self.config.hidden_size, self.config.hidden_size))
self.attn_ob = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.attn_nw = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.attn_nb = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.inter_w = nn.Parameter(torch.Tensor(self.config.intermediate_size, self.config.hidden_size))
self.inter_b = nn.Parameter(torch.Tensor(self.config.intermediate_size))
self.output_w = nn.Parameter(torch.Tensor(self.config.hidden_size, self.config.intermediate_size))
self.output_b = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.norm_w = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.norm_b = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.init_transformer_weights(self.config.adjust_init_range)
else:
# For testing only.
q = initial_weights[0].data
k = initial_weights[1].data
v = initial_weights[2].data
self.attn_qkvw = nn.Parameter(torch.cat((q, k, v)))
#self.attn_qkvw[i * self.config.hidden_size:(i + 1) * self.config.hidden_size] = \
# initial_weights[i].clone()
#torch.empty_like(initial_weights[i]).data.copy_(initial_weights[i].data)
self.attn_qkvb = nn.Parameter(torch.Tensor(self.config.hidden_size * 3))
self.attn_qkvb.data.zero_()
self.attn_ow = initial_weights[3]
self.attn_ob = initial_biases[3]
self.attn_nw = initial_weights[4]
self.attn_nb = initial_biases[4]
self.inter_w = initial_weights[5]
self.inter_b = initial_biases[5]
self.output_w = initial_weights[6]
self.output_b = initial_biases[6]
self.norm_w = initial_weights[7]
self.norm_b = initial_biases[7]
# Load cuda modules if needed
global transformer_cuda_module, stochastic_transformer_cuda_module
if transformer_cuda_module is None and not self.config.stochastic_mode:
transformer_cuda_module = TransformerBuilder().load()
if stochastic_transformer_cuda_module is None and self.config.stochastic_mode:
stochastic_transformer_cuda_module = StochasticTransformerBuilder().load()
# create the layer in cuda kernels.
cuda_module = stochastic_transformer_cuda_module if self.config.stochastic_mode else transformer_cuda_module
create_layer_func = cuda_module.create_transformer_layer_fp16 if self.config.fp16 else cuda_module.create_transformer_layer_fp32
create_layer_func(self.config.layer_id, self.config.batch_size, self.config.hidden_size, self.config.heads,
self.config.intermediate_size, self.config.attn_dropout_ratio,
self.config.hidden_dropout_ratio, self.config.layer_norm_eps, self.config.seed,
self.config.pre_layer_norm, self.config.test_gemm, self.config.attn_dropout_checkpoint,
self.config.normalize_invertible, self.config.gelu_checkpoint, self.config.stochastic_mode)
def init_transformer_weights(self, adjust_init_range=False):
num_layers = self.config.num_hidden_layers
output_std = self.config.initializer_range
if adjust_init_range and self.config.local_rank == 0:
print("Accounting for accumulation on the residual path")
output_std = self.config.initializer_range / math.sqrt(2.0 * num_layers)
self.attn_qkvw.data.normal_(mean=0.0, std=self.config.initializer_range)
self.attn_qkvb.data.zero_()
self.attn_ow.data.normal_(mean=0.0, std=output_std)
self.attn_ob.data.zero_()
self.attn_nw.data.fill_(1.0)
self.attn_nb.data.zero_()
self.inter_w.data.normal_(mean=0.0, std=self.config.initializer_range)
self.inter_b.data.zero_()
self.output_w.data.normal_(mean=0.0, std=output_std)
self.output_b.data.zero_()
self.norm_w.data.fill_(1.0)
self.norm_b.data.zero_()
def forward(self,
hidden_states,
attention_mask=None,
head_mask=None,
layer_head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
grads=None):
self.config.is_grad_enabled = torch.is_grad_enabled()
self.config.training = self.training
return DeepSpeedTransformerFunction.apply(hidden_states, attention_mask, self, grads, self.config.layer_id,
self.attn_qkvw, self.attn_qkvb, self.attn_ow, self.attn_ob,
self.attn_nw, self.attn_nb, self.inter_w, self.inter_b,
self.output_w, self.output_b, self.norm_w, self.norm_b, self.config)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
import torch
from torch.autograd import Function
import torch.nn as nn
from packaging import version as pkg_version
from deepspeed.utils.logging import log_dist
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
# Cuda modules will be imported if needed
inference_module = None
minus_inf = -10000.0
triton_flash_attn = None
def load_triton_flash_attn():
global triton_flash_attn
try:
import triton
except ImportError:
raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`")
if pkg_version.parse(triton.__version__) < pkg_version.parse("2.0"):
raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`")
from .triton_ops import triton_flash_attn
class DeepSpeedDiffusersAttentionFunction(Function):
@staticmethod
def forward(ctx, input, context, input_mask, config, attn_qkvw, attn_qw, attn_kw, attn_vw, attn_qkvb,
num_attention_heads_per_partition, norm_factor, hidden_size_per_partition, attn_ow, attn_ob,
do_out_bias, score_context_func, linear_func, triton_flash_attn_kernel):
def _transpose_for_context(x):
x = x.permute(0, 2, 1, 3)
new_x_layer_shape = x.size()[:-2] + \
(hidden_size_per_partition,)
return x.reshape(*new_x_layer_shape)
def _transpose_for_scores(x):
attention_head_size = x.shape[-1] // num_attention_heads_per_partition
new_x_shape = x.size()[:-1] + (num_attention_heads_per_partition, attention_head_size)
x = x.reshape(*new_x_shape)
x = x.permute(0, 2, 1, 3)
return x.contiguous()
def selfAttention_fp(input, context, input_mask):
if config.fp16 and input.dtype == torch.float32:
input = input.half()
head_size = input.shape[-1] // config.heads
do_flash_attn = (head_size <= 128)
scale = (1 / norm_factor) * (1 / norm_factor)
if do_flash_attn and context == None:
qkv_out = linear_func(input, attn_qkvw, attn_qkvb if attn_qkvb is not None else attn_qkvw, attn_qkvb
is not None, do_flash_attn, config.heads, False)
context_layer = triton_flash_attn_kernel(qkv_out[0], qkv_out[1], qkv_out[2], scale,
input.shape[-2] % 128 == 0)
context_layer = _transpose_for_context(context_layer[:, :, :, :head_size])
else:
do_flash_attn = False
if context is not None:
query = torch.matmul(input, attn_qw)
key = torch.matmul(context, attn_kw)
value = torch.matmul(context, attn_vw)
else:
qkv = torch.matmul(input, attn_qkvw)
query, key, value = qkv.chunk(3, dim=-1)
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
query, key, value = inference_module.pad_transform_fp16(query, key, value, config.heads, do_flash_attn)
attention_scores = (torch.matmul(query, key.transpose(-1, -2)) * scale).softmax(dim=-1)
context_layer = _transpose_for_context(torch.matmul(attention_scores, value))
output = linear_func(context_layer, attn_ow, attn_ob, do_out_bias, False, config.heads, False)
return output
output = selfAttention_fp(input, context, input_mask)
return output
@staticmethod
def backward(ctx, grad_output, grad_output1, grad_output2, grad_output3):
raise RuntimeError('You are running with DeepSpeed Inference mode. \
Please switch to Training mode for running backward!')
class DeepSpeedDiffusersAttention(nn.Module):
"""Initialize the DeepSpeed Transformer Layer.
Arguments:
layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers,
layer_id will be 0,1,2...23 when each layer object is instantiated
config: An object of DeepSpeedInferenceConfig
"""
layer_id = 0
def __init__(
self,
config,
):
super(DeepSpeedDiffusersAttention, self).__init__()
self.config = config
self.config.layer_id = DeepSpeedDiffusersAttention.layer_id
DeepSpeedDiffusersAttention.layer_id += 1
device = get_accelerator().current_device_name() if config.bigscience_bloom else 'cpu'
qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3
data_type = self.config.dtype
data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype
global inference_module
if inference_module is None:
builder = InferenceBuilder()
inference_module = builder.load()
if DeepSpeedDiffusersAttention.layer_id == 1:
log_dist(f"DeepSpeed-Attention config: {self.config.__dict__}", [0])
self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size,
qkv_size_per_partition,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_kw = nn.Parameter(torch.empty(self.config.hidden_size,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_vw = nn.Parameter(torch.empty(self.config.hidden_size,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_qw = nn.Parameter(torch.empty(self.config.hidden_size,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, dtype=data_type_fp, device=device),
requires_grad=False)
out_size_per_partition = self.config.hidden_size // self.config.mp_size
self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.do_out_bias = True
if triton_flash_attn is None:
load_triton_flash_attn()
self.triton_flash_attn_kernel = triton_flash_attn()
self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size
self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size
self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads
self.norm_factor = math.sqrt(math.sqrt(self.config.hidden_size // self.config.heads))
if self.config.scale_attn_by_inverse_layer_idx is True:
self.norm_factor *= math.sqrt(self.config.layer_id + 1)
# https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191
if self.config.dtype in [torch.float16, torch.int8]:
self.score_context_func = inference_module.softmax_context_fp16
self.linear_func = inference_module.linear_layer_fp16
self.allocate_workspace = inference_module.allocate_workspace_fp16
else:
self.score_context_func = inference_module.softmax_context_fp32
self.linear_func = inference_module.linear_layer_fp32
self.allocate_workspace = inference_module.allocate_workspace_fp32
def forward(self, input, context=None, input_mask=None):
if self.config.layer_id == 0:
self.allocate_workspace(self.config.hidden_size, self.config.heads,
input.size()[1],
input.size()[0], DeepSpeedDiffusersAttention.layer_id, self.config.mp_size, False,
0, self.config.max_out_tokens, self.config.min_out_tokens)
output = DeepSpeedDiffusersAttentionFunction.apply(input, context, input_mask, self.config, self.attn_qkvw,
self.attn_qw, self.attn_kw, self.attn_vw, self.attn_qkvb,
self.num_attention_heads_per_partition, self.norm_factor,
self.hidden_size_per_partition, self.attn_ow, self.attn_ob,
self.do_out_bias, self.score_context_func, self.linear_func,
self.triton_flash_attn_kernel)
return output
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
class Diffusers2DTransformerConfig():
def __init__(self, int8_quantization=False):
self.int8_quantization = int8_quantization
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import json
import torch
from deepspeed.utils.types import ActivationFuncType, NormType
class TransformerConfig():
def __init__(self, hidden_size, intermediate_size, heads, num_hidden_layers):
self.layer_id = -1
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.heads = heads
self.num_hidden_layers = num_hidden_layers
class DeepSpeedInferenceConfig(TransformerConfig):
"""Initialize the DeepSpeed Transformer Config.
Arguments:
hidden_size: The hidden size of the transformer layer
intermediate_size: The intermediate size of the feed-forward part of transformer layer
heads: The number of heads in the self-attention of the transformer layer
num_hidden_layers: The number of transformer layers
layer_norm_eps: The epsilon value for the layer norm
local_rank: Optional: The rank of GPU running the transformer kernel, it is not required
to use if the model already set the current device, otherwise need to set it
so that the transformer kernel can work on the right device
mp_size (optional): This argument is mainly used to create the parameters on the kernel side
using model-parallel architecture. If the client model already takes care of this, there is no
need to pass this argument.
fp16: Enable half-precision computation
bf16: Enable bf16 floating point computation
pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture
stochastic_mode: Enable for high performance, please note that this flag has some level of
non-determinism and can produce different results on different runs. However, we have seen
that by enabling it, the pretraining tasks such as BERT are not affected and can obtain
a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend
to turn it off in order to be able to reproduce the same result through the regular kernel execution.
scale_attention: If true, both q and k are scaled by 1/sqrt(attention_heads) before attention computation.
return_tuple: if True, returns the transformer output as a tuple, otherwise returns as a tensor
bigscience_bloom: This flag is added temporarily for supporting the BLOOM-176B model architecture.
"""
def __init__(self,
hidden_size=-1,
intermediate_size=-1,
heads=-1,
num_hidden_layers=-1,
layer_norm_eps=1e-12,
local_rank=-1,
mp_size=1,
dtype=torch.float16,
pre_layer_norm=True,
norm_type=NormType.LayerNorm,
stochastic_mode=False,
scale_attention=True,
triangular_masking=True,
local_attention=False,
window_size=256,
rotary_dim=-1,
rotate_half=False,
rotate_every_two=True,
return_tuple=True,
mlp_after_attn=True,
mlp_act_func_type=ActivationFuncType.GELU,
training_mp_size=1,
bigscience_bloom=False,
max_out_tokens=1024,
min_out_tokens=1,
enable_qkv_quantization=False,
use_mup=False,
scale_attn_by_inverse_layer_idx=False,
return_single_tuple=False,
set_empty_params=False,
transposed_mode=False):
super(DeepSpeedInferenceConfig,
self).__init__(hidden_size, (intermediate_size if intermediate_size > 0 else 4 * hidden_size), heads,
num_hidden_layers)
self.dtype = dtype
self.pre_layer_norm = pre_layer_norm
self.norm_type = norm_type
self.local_rank = local_rank
self.stochastic_mode = stochastic_mode
self.epsilon = layer_norm_eps
self.mp_size = mp_size
self.scale_attention = scale_attention
self.triangular_masking = triangular_masking
self.local_attention = local_attention
self.window_size = window_size
self.rotary_dim = rotary_dim
self.rotate_half = rotate_half
self.rotate_every_two = rotate_every_two
self.return_tuple = return_tuple
self.mlp_after_attn = mlp_after_attn
self.mlp_act_func_type = mlp_act_func_type
self.specialized_mode = False
self.training_mp_size = training_mp_size
self.bigscience_bloom = bigscience_bloom
self.max_out_tokens = max_out_tokens
self.min_out_tokens = min_out_tokens
self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
self.enable_qkv_quantization = enable_qkv_quantization
self.use_mup = use_mup
self.return_single_tuple = return_single_tuple
self.set_empty_params = set_empty_params
self.transposed_mode = transposed_mode
@classmethod
def from_dict(cls, json_object):
config = DeepSpeedInferenceConfig()
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
import torch
import torch.nn as nn
from deepspeed import comm as dist
from deepspeed.utils.types import GATED_ACTIVATION_TYPES
from deepspeed.accelerator import get_accelerator
from .op_binding import MLPGemmOp, VectorMatMulOp, GELUGemmOp, ResidualAddOp
class DeepSpeedMLP(nn.Module):
_inter_w_buffers = []
def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1, mlp_extra_grouping=False):
super(DeepSpeedMLP, self).__init__()
self.config = config
data_type = torch.half if self.config.dtype == torch.int8 else self.config.dtype
data_type_fp = data_type
device = get_accelerator().current_device_name()
proj_factor = 2 if self.config.mlp_act_func_type in GATED_ACTIVATION_TYPES else 1
self.config.intermediate_size = self.config.intermediate_size if self.config.intermediate_size > 0 else 4 * self.config.hidden_size
self.intm_w_sz_per_partition = self.config.intermediate_size * proj_factor // self.config.mp_size
self.intm_o_sz_per_partition = self.config.intermediate_size // self.config.mp_size
if self.config.set_empty_params:
self.attn_nw = None
self.attn_nb = None
self.inter_w = None
self.inter_b = None
self.inter_up_w = None
self.inter_up_b = None
self.inter_gate_w = None
self.inter_gate_b = None
self.output_w = None
self.output_b = None
else:
self.attn_nw = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.attn_nb = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.inter_w = nn.Parameter(torch.empty(self.config.hidden_size,
self.intm_w_sz_per_partition,
dtype=data_type,
device=device),
requires_grad=False)
self.inter_b = nn.Parameter(torch.empty(self.intm_w_sz_per_partition, dtype=data_type_fp, device=device),
requires_grad=False)
self.output_w = nn.Parameter(torch.empty(self.intm_o_sz_per_partition,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.output_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
# used for quantization
self.q_scales = q_scales
self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups
self.merge_count = int(math.log2(merge_count))
self.mp_group = mp_group
self.mlp_gemm_func = MLPGemmOp(config)
self.vector_matmul_func = VectorMatMulOp(config)
self.fused_gemm_gelu = GELUGemmOp(config)
self.residual_add_func = ResidualAddOp(config)
if len(DeepSpeedMLP._inter_w_buffers) == 0:
DeepSpeedMLP._inter_w_buffers = [
torch.empty(self.config.hidden_size, self.intm_w_sz_per_partition, dtype=data_type, device=device),
torch.empty(self.intm_w_sz_per_partition, dtype=data_type_fp, device=device)
]
def _merge_inter_w(self):
inter_w = DeepSpeedMLP._inter_w_buffers[0]
inter_w[:self.intm_w_sz_per_partition, :] = self.inter_up_w # type: ignore
inter_w[self.intm_w_sz_per_partition:, :] = self.inter_gate_w # type: ignore
if self.inter_up_b is not None:
inter_b = DeepSpeedMLP._inter_w_buffers[1]
inter_b[:self.intm_w_sz_per_partition] = self.inter_up_b # type: ignore
inter_b[self.intm_w_sz_per_partition:] = self.inter_gate_b # type: ignore
return DeepSpeedMLP._inter_w_buffers
def forward(self, input, residual, residual_norm, bias):
if self.inter_w is None:
self._inter_w, self._inter_b = self._merge_inter_w()
else:
self._inter_w = self.inter_w
self._inter_b = self.inter_b
residual_add = None
if self.attn_nw is None:
output = self.fused_gemm_gelu(input=residual_norm,
weight=self.inter_w,
bias=self.inter_b,
weight_out=self.output_w)
else:
output, residual_add = self.mlp_gemm_func(input=input,
residual=residual,
weight_interm=self.inter_w,
weight_out=self.output_w,
input_bias=bias,
bias=self.inter_b,
gamma=self.attn_nw,
beta=self.attn_nb)
residual = self.residual_add_func(hidden_state=output,
residual=residual,
add_bias=bias is not None,
attention_output=input,
attention_bias=bias if bias is not None else self.output_b,
final_bias=self.output_b,
residual_add=residual_add)
if self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1:
dist.all_reduce(residual, group=self.mp_group)
return residual
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
import torch
import torch.nn as nn
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
from .op_binding import LinearOp, VectorMatMulOp, SoftmaxContextOp, QKVGemmOp, SoftmaxOp
minus_inf = -10000.0
class DeepSpeedSelfAttention(nn.Module):
num_layers = 0
_qkv_buffers = []
def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1):
super(DeepSpeedSelfAttention, self).__init__()
self.config = config
data_type = self.config.dtype
data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype
self.config.layer_id = DeepSpeedSelfAttention.num_layers
DeepSpeedSelfAttention.num_layers = DeepSpeedSelfAttention.num_layers + 1
device = get_accelerator().current_device_name() #if config.bigscience_bloom else 'cpu'
if self.config.set_empty_params:
self.attn_qw = None
self.attn_qb = None
self.attn_kw = None
self.attn_kb = None
self.attn_vw = None
self.attn_vb = None
self.attn_qkvw = None
self.attn_qkvb = None
self.attn_ow = None
self.attn_ob = None
else:
qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3
self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size,
qkv_size_per_partition,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, dtype=data_type_fp, device=device),
requires_grad=False)
out_size_per_partition = self.config.hidden_size // self.config.mp_size
self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size
self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size
self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads
self.mp_group = mp_group
# used for quantization
self.q_scales = q_scales
self.q_groups = q_groups
self.merge_count = int(math.log2(merge_count))
self.norm_factor = math.sqrt(self.config.hidden_size // self.config.heads)
if not config.use_mup:
self.norm_factor = math.sqrt(self.norm_factor)
if self.config.scale_attn_by_inverse_layer_idx is True:
self.norm_factor *= math.sqrt(self.config.layer_id + 1)
# https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191
self.qkv_func = QKVGemmOp(config)
self.score_context_func = SoftmaxContextOp(config)
self.linear_func = LinearOp(config)
self.vector_matmul_func = VectorMatMulOp(config)
if len(DeepSpeedSelfAttention._qkv_buffers) == 0:
DeepSpeedSelfAttention._qkv_buffers = [
torch.empty(self.hidden_size_per_partition * 3,
self.config.hidden_size,
dtype=data_type_fp,
device=device),
torch.empty(self.hidden_size_per_partition * 3, dtype=data_type_fp, device=device)
]
def compute_attention(self, qkv_out, input_mask, layer_past, alibi):
if isinstance(qkv_out, list) or isinstance(qkv_out, tuple):
qkv_out = qkv_out[0]
no_masking = input_mask is None
if no_masking:
input_mask = torch.empty(1)
attn_key_value = self.score_context_func(
query_key_value=qkv_out,
attn_mask=((1 - input_mask).to(qkv_out.dtype) *
minus_inf) if input_mask.dtype == torch.int64 else input_mask,
heads=self.num_attention_heads_per_partition,
norm_factor=(1 / self.norm_factor if self.config.scale_attention else 1.0),
no_masking=no_masking,
layer_id=self.config.layer_id,
num_layers=DeepSpeedSelfAttention.num_layers,
alibi=alibi)
context_layer, key_layer, value_layer = attn_key_value
return context_layer, key_layer, value_layer
def _merge_qkv(self):
qvkw = DeepSpeedSelfAttention._qkv_buffers[0]
qvkw[:self.hidden_size_per_partition, :] = self.attn_qw # type: ignore
qvkw[self.hidden_size_per_partition:2 * self.hidden_size_per_partition, :] = self.attn_kw # type: ignore
qvkw[2 * self.hidden_size_per_partition:, :] = self.attn_vw # type: ignore
if self.attn_qb is not None:
qvkb = DeepSpeedSelfAttention._qkv_buffers[1]
qvkb[:self.hidden_size_per_partition] = self.attn_qb
qvkb[self.hidden_size_per_partition:2 * self.hidden_size_per_partition] = self.attn_kb # type: ignore
qvkb[2 * self.hidden_size_per_partition:] = self.attn_vb # type: ignore
return DeepSpeedSelfAttention._qkv_buffers
def forward(self,
input,
input_mask,
head_mask=None,
layer_past=None,
get_present=False,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
norm_w=None,
norm_b=None,
alibi=None):
if self.attn_qkvw is None:
self._attn_qkvw, self._attn_qkvb = self._merge_qkv()
else:
self._attn_qkvw = self.attn_qkvw
self._attn_qkvb = self.attn_qkvb
if not self.config.pre_layer_norm:
qkv_out = self.linear_func(input=input,
weight=self._attn_qkvw,
bias=self._attn_qkvb,
add_bias=self.attn_qkvb is not None,
do_flash_attn=False,
num_heads=self.num_attention_heads_per_partition,
num_layers=DeepSpeedSelfAttention.num_layers)
else:
qkv_out = self.qkv_func(input=input,
weight=self._attn_qkvw,
bias=self._attn_qkvb,
gamma=norm_w,
beta=norm_b)
context_layer, key_layer, value_layer = self.compute_attention(qkv_out=qkv_out,
input_mask=input_mask,
layer_past=layer_past,
alibi=alibi)
output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow)
inp_norm = qkv_out[-1]
if self.config.mlp_after_attn and self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1:
dist.all_reduce(output, group=self.mp_group)
return (output, key_layer, value_layer, context_layer, inp_norm)
class BloomSelfAttention(DeepSpeedSelfAttention):
def __init__(self, *args, **kwargs):
super(BloomSelfAttention, self).__init__(*args, **kwargs)
self.softmax_func = SoftmaxOp(self.config)
########### This part is taken/modified form the HF modeling_bloom.py ################
# Reference: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py
def _transpose_for_context(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_layer_shape = x.size()[:-2] + \
(self.hidden_size_per_partition,)
return x.view(*new_x_layer_shape).contiguous()
def _split_tensor_along_last_dim(self, tensor, num_partitions, contiguous_split_chunks=True):
"""Split a tensor along its last dimension.
Args:
tensor: ([`torch.tensor`], *required*):
input tensor to split
num_partitions ([`int`], *required*):
number of partitions to split the tensor
contiguous_split_chunks ([`bool`], *optional*, default=`False`)::
If True, make each chunk contiguous in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
numerator, denominator = tensor.size()[last_dim], num_partitions
if not (numerator % denominator == 0):
raise ValueError(f"{numerator} is not divisible by {denominator}")
last_dim_size = numerator // denominator
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
def compute_attention(self, qkv_out, input_mask, layer_past, alibi):
if isinstance(qkv_out, list) or isinstance(qkv_out, tuple):
qkv_out = qkv_out[0]
no_masking = input_mask is None
if no_masking:
input_mask = torch.empty(1)
mixed_x_layer = qkv_out
alibi = alibi.to(get_accelerator().current_device_name())
head_dim = self.hidden_size_per_partition // self.num_attention_heads_per_partition
new_tensor_shape = mixed_x_layer.size()[:-1] + (self.num_attention_heads_per_partition, 3 * head_dim)
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
query_layer, key_layer, value_layer = self._split_tensor_along_last_dim(mixed_x_layer, 3)
# [batch_size, head_dim, q_length, k_length]
output_size = (query_layer.size(0), query_layer.size(2), query_layer.size(1), key_layer.size(1))
# [batch_size, q_length, num_heads, head_dim] -> [q_length, batch_size * num_heads, head_dim]
query_layer = query_layer.transpose(1, 2).reshape(output_size[0] * output_size[1], output_size[2], -1)
# [batch_size, k_length, num_heads, head_dim] -> [k_length, batch_size * num_heads, head_dim]
key_layer = key_layer.transpose(1, 2).reshape(output_size[0] * output_size[1], output_size[3],
-1).transpose(-1, -2)
value_layer = value_layer.transpose(1, 2).reshape(output_size[0] * output_size[1], output_size[3], -1)
if layer_past is not None:
past_key, past_value = layer_past
# concatenate along seq_length dimension -> [batch_size, qk_length, num_heads, head_dim]
key_layer = torch.cat((past_key.type_as(key_layer), key_layer), dim=-1)
value_layer = torch.cat((past_value.type_as(value_layer), value_layer), dim=-2)
presents = (key_layer, value_layer)
# Raw attention scores. [batch_size * num_heads, q_length, k_length]
matmul_result = torch.matmul(query_layer, key_layer)
# change view to [batch_size, num_heads, q_length, k_length]
attention_scores = matmul_result.view(output_size[0], output_size[1], output_size[2], -1)
offset = dist.get_rank() * self.num_attention_heads_per_partition if dist.is_initialized() else 0
target_dtype = torch.float16 if self.config.dtype == torch.int8 else self.config.dtype
attention_probs = self.softmax_func(attn_scores=attention_scores,
attn_mask=((1 - input_mask).to(target_dtype) * minus_inf),
alibi=alibi,
triangular=(self.config.triangular_masking
and (attention_scores.shape[-2] > 1)),
recompute=False,
local_attention=False,
window_size=1,
async_op=False,
layer_scale=1 / (self.norm_factor * self.norm_factor),
head_offset=offset)
# change view [batch_size x num_heads, q_length, k_length]
attention_probs_reshaped = attention_probs.view(*matmul_result.shape)
# matmul: [batch_size * num_heads, q_length, head_dim]
context_layer = torch.bmm(attention_probs_reshaped, value_layer)
# change view [batch_size, num_heads, q_length, head_dim]
context_layer = context_layer.view(
context_layer.size(0) // self.num_attention_heads_per_partition, self.num_attention_heads_per_partition,
context_layer.size(1), context_layer.shape[-1])
context_layer = self._transpose_for_context(context_layer)
key_layer = presents[0]
value_layer = presents[1]
return context_layer, key_layer, value_layer
###################### End of HF modeling_bloom addition ########################
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .config import DeepSpeedInferenceConfig
from ....model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
from .moe_inference import DeepSpeedMoEInferenceConfig, DeepSpeedMoEInference
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from typing import Optional
import torch
from deepspeed.ops.op_builder import SpatialInferenceBuilder
spatial_cuda_module = None
def nhwc_bias_add(activation: torch.Tensor,
bias: torch.Tensor,
other: Optional[torch.Tensor] = None,
other_bias: Optional[torch.Tensor] = None) -> torch.Tensor:
global spatial_cuda_module
if spatial_cuda_module is None:
spatial_cuda_module = SpatialInferenceBuilder().load()
if other is None:
return spatial_cuda_module.nhwc_bias_add(activation, bias)
elif other_bias is None:
return spatial_cuda_module.nhwc_bias_add_add(activation, bias, other)
else:
return spatial_cuda_module.nhwc_bias_add_bias_add(activation, bias, other, other_bias)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Inspired by original Triton implementation:
https://github.com/openai/triton/blob/b244db06da24a87453a40ad35b085ee37dac3705/python/tutorials/06-fused-attention.py
"""
import torch
import triton
import triton.language as tl
@triton.jit
def _fwd_kernel(
Q,
K,
V,
sm_scale,
TMP,
Out,
stride_qz,
stride_qh,
stride_qm,
stride_qk,
stride_kz,
stride_kh,
stride_kn,
stride_kk,
stride_vz,
stride_vh,
stride_vk,
stride_vn,
stride_oz,
stride_oh,
stride_om,
stride_on,
Z,
H,
N_CTX,
BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_DMODEL)
off_q = off_hz * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk
off_k = off_hz * stride_kh + offs_n[:, None] * stride_kn + offs_d[None, :] * stride_kk
off_v = off_hz * stride_vh + offs_n[:, None] * stride_qm + offs_d[None, :] * stride_qk
# Initialize pointers to Q, K, V
q_ptrs = Q + off_q
k_ptrs = K + off_k
v_ptrs = V + off_v
# initialize pointer to m and l
t_ptrs = TMP + off_hz * N_CTX + offs_m
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# load q: it will stay in SRAM throughout
q = tl.load(q_ptrs)
# loop over k, v and update accumulator
for start_n in range(0, N_CTX, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(k_ptrs + start_n * stride_kn)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k, trans_b=True)
qk *= sm_scale
# -- compute m_ij, p, l_ij
m_ij = tl.max(qk, 1)
p = tl.exp(qk - m_ij[:, None])
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
m_i_new = tl.maximum(m_i, m_ij)
alpha = tl.exp(m_i - m_i_new)
beta = tl.exp(m_ij - m_i_new)
l_i_new = alpha * l_i + beta * l_ij
# -- update output accumulator --
# scale p
p_scale = beta / l_i_new
p = p * p_scale[:, None]
# scale acc
acc_scale = l_i / l_i_new * alpha
tl.store(t_ptrs, acc_scale)
acc_scale = tl.load(t_ptrs) # BUG: have to store and immediately load
acc = acc * acc_scale[:, None]
# update acc
v = tl.load(v_ptrs + start_n * stride_vk)
p = p.to(tl.float16)
acc += tl.dot(p, v)
# update m_i and l_i
l_i = l_i_new
m_i = m_i_new
# initialize pointers to output
offs_n = tl.arange(0, BLOCK_DMODEL)
off_o = off_hz * stride_oh + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on
out_ptrs = Out + off_o
tl.store(out_ptrs, acc)
class triton_flash_attn(torch.nn.Module):
def __init__(self, ):
super(triton_flash_attn, self).__init__()
def forward(self, q, k, v, sm_scale, block_128=True):
BLOCK = 128 if block_128 else 64
# shape constraints
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
o = torch.empty_like(q)
grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1])
tmp = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
num_warps = 4 if Lk <= 64 else 8
_fwd_kernel[grid](
q,
k,
v,
sm_scale,
tmp,
o,
q.stride(0),
q.stride(1),
q.stride(2),
q.stride(3),
k.stride(0),
k.stride(1),
k.stride(2),
k.stride(3),
v.stride(0),
v.stride(1),
v.stride(2),
v.stride(3),
o.stride(0),
o.stride(1),
o.stride(2),
o.stride(3),
k.shape[0],
k.shape[1],
k.shape[2],
BLOCK_M=BLOCK,
BLOCK_N=BLOCK,
BLOCK_DMODEL=Lk,
num_warps=num_warps,
num_stages=1,
)
return o
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import torch.nn as nn
from deepspeed import module_inject
from .diffusers_attention import DeepSpeedDiffusersAttention
from .bias_add import nhwc_bias_add
from .diffusers_2d_transformer import Diffusers2DTransformerConfig
from deepspeed.ops.op_builder import InferenceBuilder, SpatialInferenceBuilder
from deepspeed.utils.types import ActivationFuncType
# Ops will be loaded on demand
transformer_cuda_module = None
spatial_cuda_module = None
def load_transformer_module():
global transformer_cuda_module
if transformer_cuda_module is None:
transformer_cuda_module = InferenceBuilder().load()
return transformer_cuda_module
def load_spatial_module():
global spatial_cuda_module
if spatial_cuda_module is None:
spatial_cuda_module = SpatialInferenceBuilder().load()
return spatial_cuda_module
class DeepSpeedDiffusersTransformerBlock(nn.Module):
def __init__(self, equivalent_module: nn.Module, config: Diffusers2DTransformerConfig):
super(DeepSpeedDiffusersTransformerBlock, self).__init__()
self.quantizer = module_inject.GroupQuantizer(q_int8=config.int8_quantization)
# Ensure ops are built by the time we start running
self.config = config
self.ff1_w = self.quantizer.quantize(
nn.Parameter(equivalent_module.ff.net[0].proj.weight.data, requires_grad=False))
self.ff1_b = nn.Parameter(equivalent_module.ff.net[0].proj.bias.data, requires_grad=False)
self.ff2_w = self.quantizer.quantize(nn.Parameter(equivalent_module.ff.net[2].weight.data,
requires_grad=False))
self.ff2_b = nn.Parameter(equivalent_module.ff.net[2].bias.data, requires_grad=False)
self.norm1_g = nn.Parameter(equivalent_module.norm1.weight.data, requires_grad=False)
self.norm1_b = nn.Parameter(equivalent_module.norm1.bias.data, requires_grad=False)
self.norm1_eps = equivalent_module.norm1.eps
self.norm2_g = nn.Parameter(equivalent_module.norm2.weight.data, requires_grad=False)
self.norm2_b = nn.Parameter(equivalent_module.norm2.bias.data, requires_grad=False)
self.norm2_eps = equivalent_module.norm2.eps
self.norm3_g = nn.Parameter(equivalent_module.norm3.weight.data, requires_grad=False)
self.norm3_b = nn.Parameter(equivalent_module.norm3.bias.data, requires_grad=False)
self.norm3_eps = equivalent_module.norm3.eps
self.attn_1 = equivalent_module.attn1
self.attn_2 = equivalent_module.attn2
# Pull the bias in if we can
if isinstance(self.attn_1, DeepSpeedDiffusersAttention):
self.attn_1.do_out_bias = False
self.attn_1_bias = self.attn_1.attn_ob
else:
self.attn_1_bias = nn.Parameter(torch.zeros_like(self.norm2_g), requires_grad=False)
# Pull the bias in if we can
if isinstance(self.attn_2, DeepSpeedDiffusersAttention):
self.attn_2.do_out_bias = False
self.attn_2_bias = self.attn_2.attn_ob
else:
self.attn_2_bias = nn.Paramaeter(torch.zeros_like(self.norm3_g), requires_grad=False)
self.transformer_cuda_module = load_transformer_module()
load_spatial_module()
def forward(self, hidden_states, context=None, timestep=None, **kwargs):
# In v0.12.0 of diffuser, several new kwargs were added. Capturing
# those with kwargs to maintain backward compatibility
# In v0.11.0 of diffusers, the kwarg was changed from 'context' to 'encoder_hidden_states'
# This is so we can support older and newer versions of diffusers
if "encoder_hidden_states" in kwargs and kwargs["encoder_hidden_states"] != None:
context = kwargs["encoder_hidden_states"]
out_norm_1 = self.transformer_cuda_module.layer_norm(hidden_states, self.norm1_g, self.norm1_b, self.norm1_eps)
out_attn_1 = self.attn_1(out_norm_1)
out_norm_2, out_attn_1 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res(
out_attn_1, self.attn_1_bias, hidden_states, self.norm2_g, self.norm2_b, self.norm2_eps)
out_attn_2 = self.attn_2(out_norm_2, context=context)
out_norm_3, out_attn_2 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res(
out_attn_2, self.attn_2_bias, out_attn_1, self.norm3_g, self.norm3_b, self.norm3_eps)
out_ff1 = nn.functional.linear(out_norm_3, self.ff1_w)
out_geglu = self.transformer_cuda_module.gated_activation(out_ff1, self.ff1_b, ActivationFuncType.GATED_GELU)
out_ff2 = nn.functional.linear(out_geglu, self.ff2_w)
return nhwc_bias_add(out_ff2, self.ff2_b, other=out_attn_2)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import json
import math
import torch
from torch.autograd import Function
# accelerator modules will be imported if needed
inference_module = None
specialized_mode = None
import torch.nn as nn
from .ds_attention import DeepSpeedSelfAttention
from .config import DeepSpeedInferenceConfig
from ....moe.sharded_moe import TopKGate
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
class DeepSpeedMoEInferenceConfig(DeepSpeedInferenceConfig):
"""Initialize the DeepSpeed Transformer Config.
Arguments:
hidden_size: The hidden size of the transformer layer
intermediate_size: The intermediate size of the feed-forward part of transformer layer
heads: The number of heads in the self-attention of the transformer layer
num_hidden_layers: The number of transformer layers
layer_norm_eps: The epsilon value for the layer norm
local_rank: Optional: The rank of GPU running the transformer kernel, it is not required
to use if the model already set the current device, otherwise need to set it
so that the transformer kernel can work on the right device
mp_size (optional): This argument is mainly used to create the parameters on the kernel side
using model-parallel architecture. If the client model already takes care of this, there is no
need to pass this argument.
fp16: Enable half-precision computation
bf16: Enable bf16 floating point computation
pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture
stochastic_mode: Enable for high performance, please note that this flag has some level of
non-determinism and can produce different results on different runs. However, we have seen
that by enabling it, the pretraining tasks such as BERT are not affected and can obtain
a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend
to turn it off in order to be able to reproduce the same result through the regular kernel execution.
scale_attention: If true, both q and k are scaled by 1/sqrt(attention_heads) before attention computation.
return_tuple: if True, returns the transformer output as a tuple, otherwise returns as a tensor
"""
def __init__(self,
hidden_size=-1,
intermediate_size=-1,
heads=-1,
num_hidden_layers=-1,
layer_norm_eps=1e-12,
local_rank=-1,
mp_size=1,
fp16=False,
bf16=False,
q_int8=False,
pre_layer_norm=True,
stochastic_mode=False,
scale_attention=True,
triangular_masking=True,
local_attention=False,
window_size=256,
return_tuple=True,
moe_experts=1,
global_experts=1,
k=1,
capacity_factor=1.,
eval_capacity_factor=1.,
min_capacity=1,
noisy_gate_policy=None,
drop_tokens=True,
use_rts=False,
mlp_type='standard',
scale_attn_by_inverse_layer_idx=False):
super(DeepSpeedMoEInferenceConfig,
self).__init__(hidden_size, (intermediate_size if intermediate_size > 0 else 4 * hidden_size), heads,
num_hidden_layers, layer_norm_eps, local_rank, mp_size, fp16, bf16, q_int8,
pre_layer_norm, stochastic_mode, scale_attention, triangular_masking, local_attention,
window_size, return_tuple)
self.moe_experts = moe_experts
self.k = k
self.capacity_factor = capacity_factor
self.eval_capacity_factor = eval_capacity_factor
self.min_capacity = min_capacity
self.noisy_gate_policy = noisy_gate_policy
self.drop_tokens = drop_tokens
self.use_rts = use_rts
self.global_experts = global_experts
self.mlp_type = mlp_type
self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
@classmethod
def from_dict(cls, json_object):
config = DeepSpeedInferenceConfig()
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
class DeepSpeedMLPFunction(Function):
@staticmethod
def forward(ctx, input, inter_w, inter_b, config, output_b, output_w, q_scales, q_groups, merge_count, mp_group,
async_op):
if config.q_int8:
intermediate = inference_module.fused_gemm_gelu_int8(input, inter_w, inter_b, config.epsilon, q_scales[2],
(q_groups * (2**merge_count)), config.pre_layer_norm)
output = inference_module.vector_matmul_int8(intermediate, output_w, q_scales[3], q_groups, (merge_count))
else:
mlp_gemm_func = inference_module.fused_gemm_gelu_fp16 if config.fp16 else \
inference_module.fused_gemm_gelu_fp32
output = mlp_gemm_func(input, inter_w, inter_b, output_w, config.epsilon, config.pre_layer_norm, async_op)
if mp_group is not None and dist.get_world_size(group=mp_group) > 1:
dist.all_reduce(output, group=mp_group, async_op=async_op)
return output + output_b
@staticmethod
def backward(ctx, grad_output):
raise RuntimeError('You are running with DeepSpeed Inference mode. \
Please switch to Training mode for running backward!')
class DeepSpeedMoEMLP(nn.Module):
def __init__(self, config, q_scales=None, q_groups=1, merge_count=1, mlp_extra_grouping=False, mp_group=None):
super(DeepSpeedMoEMLP, self).__init__()
self.config = config
self.attn_nw = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.attn_nb = nn.Parameter(torch.Tensor(self.config.hidden_size))
interm_size = self.config.intermediate_size // (1 if mp_group is None else dist.get_world_size(group=mp_group))
self.inter_w = nn.Parameter(torch.Tensor(self.config.hidden_size, interm_size))
self.inter_b = nn.Parameter(torch.Tensor(interm_size))
self.output_w = nn.Parameter(torch.Tensor((interm_size), self.config.hidden_size))
self.output_b = nn.Parameter(torch.Tensor(self.config.hidden_size))
# used for quantization
self.q_scales = q_scales
self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups
self.merge_count = int(math.log2(merge_count))
self.mp_group = mp_group
def forward(self, input, async_op=False):
return DeepSpeedMLPFunction.apply(input, self.inter_w, self.inter_b, self.config, self.output_b, self.output_w,
self.q_scales, self.q_groups, self.merge_count, self.mp_group, async_op)
class DeepSpeedMoEInference(nn.Module):
"""Initialize the DeepSpeed MoE Transformer Layer.
Arguments:
layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers,
layer_id will be 0,1,2...23 when each layer object is instantiated
config: An object of DeepSpeedInferenceConfig
mp_group: Model parallelism group initialized on the modeling side.
quantize_scales: This argument groups all the layers' scales used for quantization
quantize_groups: Number of groups used for quantizing the model
merge_count: Shows the number of model-parallel checkpoints merged before running inference.
We use this argument to control the quantization scale for the model parameters if a bigger
quantize-grouping than 1 is used.
mlp_extra_grouping: This flag is used to show a 2x higher number of groups used for the MLP part
of a Transformer layer. We use this feature for quantization to reduce the convergence impact
for specific downstream tasks.
"""
layer_id = 0
def __init__(self,
config,
mp_group=None,
ep_group=None,
expert_mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super(DeepSpeedMoEInference, self).__init__()
self.config = config
self.config.layer_id = DeepSpeedMoEInference.layer_id
global inference_module
global specialized_mode
if inference_module is None:
specialized_mode = False
# InferenceSpecializedBuilder is not among DeepSpeed provided builder yet, so we infer by builder name string
builder = get_accelerator().create_op_builder("InferenceSpecializedBuilder")
if builder != None and builder.is_compatible():
inference_module = builder.load()
specialized_mode = True
else:
inference_module = InferenceBuilder().load()
self.config.specialized_mode = specialized_mode
assert self.config.dtype != torch.bfloat16, "DeepSpeed MoE Transformer Inference not yet tested for bfloat support"
DeepSpeedMoEInference.layer_id += 1
self.attention = DeepSpeedSelfAttention(self.config, mp_group, quantize_scales, quantize_groups, merge_count)
self.attn_nw = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.attn_nb = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.norm_w = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.norm_b = nn.Parameter(torch.Tensor(self.config.hidden_size))
if config.mlp_type == 'residual':
self.res_mlp = DeepSpeedMoEMLP(config, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping,
mp_group)
self.res_coef = nn.Parameter(torch.Tensor(self.config.hidden_size, 2))
self.coef_func = inference_module.softmax_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
inference_module.softmax_fp32
self.vector_matmul_func = inference_module.vector_matmul_fp16 if self.config.dtype == torch.float16 else \
inference_module.vector_matmul_fp32
config.mp_size = 1
self.mlp = nn.ModuleList(
DeepSpeedMoEMLP(config, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping, expert_mp_group)
for i in range(self.config.moe_experts))
self.moe_gate = TopKGate(self.config.hidden_size, self.config.global_experts, self.config.k,
self.config.capacity_factor, self.config.eval_capacity_factor,
self.config.min_capacity, self.config.noisy_gate_policy, self.config.drop_tokens,
self.config.use_rts)
self.ep_group = ep_group
self.mp_group = mp_group
self.expert_mp_group = expert_mp_group
print("DeepSpeed MoE Transformer Inference config is ", self.config.__dict__)
self.bias_residual_func = inference_module.bias_residual_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
inference_module.bias_residual_fp32
self.ds_layernorm = inference_module.layer_norm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
inference_module.layer_norm_fp32
self.einsum_sec_sm_ecm = inference_module.einsum_sec_sm_ecm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
inference_module.einsum_sec_sm_ecm_fp32
def res_coef_func(self, inp, async_op):
inp = self.vector_matmul_func(inp, self.res_coef, async_op)
return self.coef_func(inp, torch.empty(1), False, False, False, 256, async_op)
def moe_gate_einsum(self, attention_output):
_, combined_weights, dispatch_mask, _ = self.moe_gate(
attention_output.view(-1, self.config.hidden_size),
None,
)
dispatched_attention = self.einsum_sec_sm_ecm(dispatch_mask.type_as(attention_output),
attention_output.view(-1, self.config.hidden_size))
return dispatched_attention, combined_weights
def expert_exec(self, dispatched_input):
dispatched_input = dispatched_input.reshape(self.config.global_experts // self.config.moe_experts,
self.config.moe_experts, -1, self.config.hidden_size)
chunks = dispatched_input.chunk(self.config.moe_experts, dim=1)
expert_outputs = torch.empty((
self.config.moe_experts,
chunks[0].shape[0],
) + chunks[0].shape[2:],
dtype=dispatched_input.dtype,
device=dispatched_input.device)
for chunk, expert in zip(chunks, range(len(self.mlp))):
expert_outputs[expert] = self.mlp[expert](chunk.view(-1, dispatched_input.shape[-2],
dispatched_input.shape[-1]))
return expert_outputs
def _alltoall(self, dispatched_attention):
if dist.get_world_size(group=self.ep_group) > 1:
dispatched_input = torch.empty_like(dispatched_attention)
dist.all_to_all_single(dispatched_input, dispatched_attention, group=self.ep_group)
return dispatched_input
else:
return dispatched_attention
def scale_expert_output(self, attention_output, expert_output, combined_weights):
combined_output = torch.matmul(
combined_weights.type_as(attention_output).reshape(combined_weights.shape[0], -1),
expert_output.reshape(-1, expert_output.shape[-1]))
return combined_output.reshape(attention_output.shape)
def forward(self,
input,
input_mask=None,
attention_mask=None,
head_mask=None,
layer_past=None,
get_key_value=False,
get_present=False,
encoder_output=None,
enc_dec_attn_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False):
get_present = (get_present or get_key_value or use_cache)
input_mask = input_mask if attention_mask is None else attention_mask
input_type = input.dtype
if (self.config.dtype in [torch.float16, torch.int8]) and input_type == torch.float:
input = input.half()
with torch.no_grad():
attention_output = self.attention(input, input_mask, head_mask, layer_past, get_present,
encoder_hidden_states, encoder_attention_mask, output_attentions,
self.norm_w, self.norm_b)
if get_present:
attention_output, p_key, p_value = attention_output[0:3]
presents = (p_key, p_value)
elif output_attentions:
attention_output, _, _, context_output = attention_output[0:4]
else:
attention_output = attention_output[0]
residual_add = attention_output + self.attention.attn_ob
attention_output = self.ds_layernorm(residual_add, self.attn_nw, self.attn_nb, self.config.epsilon)
if self.config.mlp_type == 'residual':
res_mlp_out = self.res_mlp(attention_output, async_op=True)
res_coef_out = self.res_coef_func(attention_output, async_op=True)
if self.expert_mp_group is not None:
tensor_list = [
torch.empty_like(attention_output) for _ in range(dist.get_world_size(group=self.expert_mp_group))
]
tensor_list[dist.get_rank(group=self.expert_mp_group)] = attention_output
dist.all_gather(tensor_list, attention_output, group=self.expert_mp_group)
attention_output = torch.cat(tensor_list).contiguous()
############## MoE Gating + Experts ###############
dispatched_attention, combined_weights = self.moe_gate_einsum(attention_output)
dispatched_input = self._alltoall(dispatched_attention)
expert_outputs = self.expert_exec(dispatched_input)
expert_output = self._alltoall(expert_outputs)
output = self.scale_expert_output(attention_output, expert_output, combined_weights)
################################################
if self.expert_mp_group is not None:
output = output.split(output.shape[0] // dist.get_world_size(group=self.expert_mp_group),
dim=0)[dist.get_rank(group=self.expert_mp_group)]
if self.config.mlp_type == 'residual':
inference_module.moe_res_matmul(res_mlp_out, res_coef_out, output)
output = self.bias_residual_func(output, residual_add, torch.empty(1))
if not self.config.pre_layer_norm:
output = self.ds_layernorm(output, self.norm_w, self.norm_b, self.config.epsilon)
if input_type != output.dtype:
output = output.to(input_type)
if get_present:
output = (output, presents)
if self.config.return_tuple:
return output if type(output) is tuple else (output, )
else:
return output
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed import comm as dist
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
class SoftmaxContextOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(SoftmaxContextOp, self).__init__(config)
try:
if self.config.dtype in [torch.float16, torch.int8]:
self.softmax_context_func = self.inference_module.softmax_context_fp16
elif self.config.dtype == torch.bfloat16:
self.softmax_context_func = self.inference_module.softmax_context_bf16
else:
self.softmax_context_func = self.inference_module.softmax_context_fp32
except AttributeError:
self.softmax_context_func = self.softmax_context_fallback
def softmax_context_fallback(self, query_key_value, attn_mask, rotary_dim, rotate_half, roteate_every_two, heads,
norm_factor, triangular_masking, local_attention, window_size, no_masking, layer_id,
num_layers, alibi):
raise NotImplementedError
def forward(self, query_key_value: torch.Tensor, attn_mask: torch.Tensor, heads: int, norm_factor: float,
no_masking: bool, layer_id: int, num_layers: int, alibi: torch.Tensor):
if alibi is not None:
batch_heads = query_key_value.shape[0] * heads
offset = dist.get_rank() * batch_heads if dist.is_initialized() else 0
alibi = alibi[offset:batch_heads + offset, :, :]
else:
alibi = torch.empty(1)
output = self.softmax_context_func(query_key_value, attn_mask, self.config.rotary_dim, self.config.rotate_half,
self.config.rotate_every_two, heads, norm_factor,
self.config.triangular_masking, self.config.local_attention,
self.config.window_size, no_masking, layer_id, num_layers, alibi)
return output
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
class VectorMatMulOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(VectorMatMulOp, self).__init__(config)
try:
if self.config.dtype in [torch.float16, torch.int8]:
self.vector_matmul_func = self.inference_module.vector_matmul_fp16
elif self.config.dtype == torch.bfloat16:
self.vector_matmul_func = self.inference_module.vector_matmul_bf16
else:
self.vector_matmul_func = self.inference_module.vector_matmul_fp32
except AttributeError:
self.vector_matmul_func = self.vector_matmul_fallback
def vector_matmul_fallback(self, input, weight, async_op, q_scale, q_int8, transpose):
if os.environ.get('DS_KI_FALLBACK') == 'True' and not transpose:
return torch.matmul(input, weight)
else:
raise NotImplementedError
def forward(self, input: torch.Tensor, weight: torch.Tensor, async_op: bool = False):
q_scale = weight.scale if hasattr(weight, 'scale') else torch.empty(1)
q_int8 = self.config.dtype == torch.int8
output = self.vector_matmul_func(input, weight, async_op, q_scale, q_int8, self.config.transposed_mode)
return output
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
class LinearOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(LinearOp, self).__init__(config)
try:
if self.config.dtype in [torch.float16, torch.int8]:
self.linear_func = self.inference_module.linear_layer_fp16
elif self.config.dtype == torch.bfloat16:
self.linear_func = self.inference_module.linear_layer_bf16
else:
self.linear_func = self.inference_module.linear_layer_fp32
except AttributeError:
self.linear_func = self.linear_fallback
def linear_fallback(self, input, weight, bias, add_bias, do_flash_attn, num_heads, transpose):
raise NotImplementedError
def forward(self,
input: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
add_bias: bool,
do_flash_attn: bool,
num_heads: int,
external_cache: bool = None,
num_layers: int = None):
qkv_out = self.linear_func(input, weight, bias, add_bias, do_flash_attn, num_heads,
self.config.transposed_mode)
return qkv_out
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .linear import LinearOp
from .vector_matmul import VectorMatMulOp
from .softmax_context import SoftmaxContextOp
from .qkv_gemm import QKVGemmOp
from .softmax import SoftmaxOp
from .mlp_gemm import MLPGemmOp
from .gelu_gemm import GELUGemmOp
from .residual_add import ResidualAddOp
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
class GELUGemmOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(GELUGemmOp, self).__init__(config)
try:
if self.config.dtype in [torch.float16, torch.int8]:
self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_fp16 # type: ignore
elif self.config.dtype == torch.bfloat16:
self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_bf16
else:
self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_fp32 # type: ignore
except AttributeError:
self.fused_gemm_gelu = self.gelu_gemm_fallback
def gelu_gemm_fallback(self, input, weight, scale, bias, out, out_scale, dtype, transpose):
raise NotImplementedError
def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, weight_out: torch.Tensor):
output = self.fused_gemm_gelu(
input,
weight,
weight.scale if hasattr(weight, 'scale') else torch.empty(1), # type: ignore
bias,
weight_out,
weight_out.scale if hasattr(weight_out, 'scale') else torch.empty(1), # type: ignore
self.config.dtype == torch.int8,
self.config.transposed_mode)
return output
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import torch.nn.functional as F
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
from deepspeed.utils.types import NormType
class QKVGemmOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(QKVGemmOp, self).__init__(config)
try:
if self.config.norm_type == NormType.LayerNorm:
if self.config.dtype in [torch.float16, torch.int8]:
self.qkv_gemm_func = self.inference_module.qkv_gemm_fp16 # type: ignore
elif self.config.dtype == torch.bfloat16:
self.qkv_gemm_func = self.inference_module.qkv_gemm_bf16
else:
self.qkv_gemm_func = self.inference_module.qkv_gemm_fp32 # type: ignore
elif self.config.norm_type == NormType.RMSNorm:
if self.config.dtype in [torch.float16, torch.int8]:
self.qkv_gemm_func = self.inference_module.rms_qkv_gemm_fp16 # type: ignore
elif self.config.dtype == torch.bfloat16:
self.qkv_gemm_func = self.inference_module.rms_qkv_gemm_bf16
else:
self.qkv_gemm_func = self.inference_module.rms_qkv_gemm_fp32 # type: ignore
except AttributeError:
if self.config.norm_type == NormType.LayerNorm:
self.qkv_gemm_func = self.qkv_gemm_fallback
elif self.config.norm_type == NormType.RMSNorm:
self.qkv_gemm_func = self.rms_qkv_gemm_fallback
def qkv_gemm_fallback(self, input, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose):
if os.environ.get('DS_KI_FALLBACK') == 'True' and not transpose:
inp_norm = F.layer_norm(input, (input.shape[2], ), gamma, beta, eps)
tmp = torch.matmul(inp_norm, weight)
if add_bias:
tmp += bias
output = [tmp, inp_norm]
return output
else:
raise NotImplementedError
def rms_qkv_gemm_fallback(self, input, weight, q_scale, gamma, eps, q_int8, transpose):
raise NotImplementedError
def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, gamma: torch.Tensor,
beta: torch.Tensor):
add_bias = bias is not None
bias = bias if add_bias else torch.empty(1) # type: ignore
q_scale = weight.scale if hasattr(weight, 'scale') else torch.empty(1) # type: ignore
q_int8 = self.config.dtype == torch.int8
if self.config.norm_type == NormType.LayerNorm:
output, norm = self.qkv_gemm_func(input, weight, q_scale, bias, gamma, beta, self.config.epsilon, add_bias,
q_int8, self.config.transposed_mode)
else:
output, norm = self.qkv_gemm_func(input, weight, q_scale, gamma, self.config.epsilon, q_int8,
self.config.transposed_mode)
return output, norm
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from typing import Optional
import os
import torch
import torch.nn.functional as F
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
from deepspeed.utils.types import NormType
class MLPGemmOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(MLPGemmOp, self).__init__(config)
try:
if self.config.norm_type == NormType.LayerNorm:
if self.config.dtype in [torch.float16, torch.int8]:
self.mlp_gemm_func = self.inference_module.mlp_gemm_fp16 # type: ignore
elif self.config.dtype == torch.bfloat16:
self.mlp_gemm_func = self.inference_module.mlp_gemm_bf16
else:
self.mlp_gemm_func = self.inference_module.mlp_gemm_fp32 # type: ignore
elif self.config.norm_type == NormType.RMSNorm:
if self.config.dtype in [torch.float16, torch.int8]:
self.mlp_gemm_func = self.inference_module.rms_mlp_gemm_fp16 # type: ignore
elif self.config.dtype == torch.bfloat16:
self.mlp_gemm_func = self.inference_module.rms_mlp_gemm_bf16
else:
self.mlp_gemm_func = self.inference_module.rms_mlp_gemm_fp32 # type: ignore
except AttributeError:
if self.config.norm_type == NormType.LayerNorm:
self.mlp_gemm_func = self.mlp_gemm_fallback
elif self.config.norm_type == NormType.RMSNorm:
self.mlp_gemm_func = self.rms_mlp_gemm_fallback
def mlp_gemm_fallback(self, input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps,
pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type,
transpose):
if os.environ.get('DS_KI_FALLBACK') == 'True' and mlp_after_attn and not transpose:
residual_add = F.layer_norm(input + residual + input_bias, (input.shape[2], ), gamma, beta,
self.config.epsilon)
tmp = torch.matmul(residual_add, weight_interm)
tmp = F.gelu(tmp + bias)
output = torch.matmul(tmp, weight_out)
return (output, residual_add)
else:
raise NotImplementedError
def rms_mlp_gemm_fallback(self, input, residual, weight_interm, weight_out, gamma, eps, interm_scale, out_scale,
dtype, mlp_act_func_type, transpose):
raise NotImplementedError
def forward(self,
input: torch.Tensor,
residual: torch.Tensor,
weight_interm: torch.Tensor,
weight_out: torch.Tensor,
input_bias: Optional[torch.Tensor] = None,
bias: Optional[torch.Tensor] = None,
gamma: Optional[torch.Tensor] = None,
beta: Optional[torch.Tensor] = None):
if self.config.norm_type == NormType.LayerNorm:
output, residual_add = self.mlp_gemm_func(
input,
residual,
input_bias,
weight_interm,
weight_out,
bias,
gamma,
beta,
self.config.epsilon,
self.config.pre_layer_norm,
self.config.mlp_after_attn,
weight_interm.scale if hasattr(weight_interm, 'scale') else torch.empty(1), # type: ignore
weight_out.scale if hasattr(weight_out, 'scale') else torch.empty(1), # type: ignore
self.config.dtype == torch.int8,
self.config.mlp_act_func_type,
self.config.transposed_mode)
else:
output, residual_add = self.mlp_gemm_func(
input,
residual,
weight_interm,
weight_out,
gamma,
self.config.epsilon,
weight_interm.scale if hasattr(weight_interm, 'scale') else torch.empty(1), # type: ignore
weight_out.scale if hasattr(weight_out, 'scale') else torch.empty(1), # type: ignore
self.config.dtype == torch.int8,
self.config.mlp_act_func_type,
self.config.transposed_mode)
return output, residual_add
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import torch.nn.functional as F
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
class SoftmaxOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(SoftmaxOp, self).__init__(config)
self.num_attention_heads_per_partition = config.heads // config.mp_size
try:
if self.config.dtype in [torch.float16, torch.int8]:
self.softmax_func = self.inference_module.softmax_fp16
elif self.config.dtype == torch.bfloat16:
self.softmax_func = self.inference_module.softmax_bf16
else:
self.softmax_func = self.inference_module.softmax_fp32
except AttributeError:
self.softmax_func = self.softmax_fallback
def softmax_fallback(self, attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size,
async_op, layer_scale, head_offset, mp_size):
if os.environ.get('DS_KI_FALLBACK') == 'True':
alibi = alibi[head_offset:head_offset + self.num_attention_heads_per_partition]
input_dtype = attn_scores.dtype
if (triangular):
tri = ~torch.tril(torch.ones(attn_scores.size(), device=attn_scores.device)).to(bool)
attn_scores = torch.masked_fill(attn_scores * layer_scale, tri, torch.finfo(input_dtype).min)
if alibi is not None:
attn_scores += alibi
if attn_mask is not None:
# expand atten_mask from two dim into 4 dim, insert two dims in the middle
attn_mask = attn_mask[:, None, None, :]
attn_scores += attn_mask
output = F.softmax(attn_scores, dim=-1, dtype=torch.float32).to(input_dtype)
return output
else:
raise NotImplementedError
def forward(self, attn_scores: torch.Tensor, attn_mask: torch.Tensor, alibi: torch.Tensor, triangular: bool,
recompute: bool, local_attention: bool, window_size: int, async_op: bool, layer_scale: float,
head_offset: int):
output = self.softmax_func(attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size,
async_op, layer_scale, head_offset, self.config.mp_size)
return output
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..config import DeepSpeedInferenceConfig
from deepspeed.ops.op_builder import InferenceBuilder
class BaseOp(torch.nn.Module):
inference_module = None
def __init__(self, config: DeepSpeedInferenceConfig):
super(BaseOp, self).__init__()
self.config = config
if BaseOp.inference_module is None:
builder = InferenceBuilder()
BaseOp.inference_module = builder.load()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
from typing import Optional
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
class ResidualAddOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(ResidualAddOp, self).__init__(config)
try:
if self.config.dtype in [torch.float16, torch.int8]:
self.residual_add_func = self.inference_module.residual_add_bias_fp16
elif self.config.dtype == torch.bfloat16:
self.residual_add_func = self.inference_module.residual_add_bias_bf16
else:
self.residual_add_func = self.inference_module.residual_add_bias_fp32
self._vector_add = self.inference_module._vector_add
except AttributeError:
self.residual_add_func = None
self._vector_add = None
def forward(self,
hidden_state: torch.Tensor,
residual: torch.Tensor,
add_bias: bool,
attention_output: Optional[torch.Tensor] = None,
residual_add: Optional[torch.Tensor] = None,
attention_bias: Optional[torch.Tensor] = None,
final_bias: Optional[torch.Tensor] = None):
if self.residual_add_func != None:
if final_bias is None:
residual = self._vector_add(residual, hidden_state, 1.0 / self.config.mp_size)
else:
if not self.config.pre_layer_norm and residual_add is not None:
# only use residual add if its set and we are not pre layer norm
residual = residual_add
self.residual_add_func(hidden_state, residual, attention_output, attention_bias, final_bias,
self.config.mp_size, self.config.mlp_after_attn, add_bias,
self.config.pre_layer_norm)
else:
# fallback
if os.environ.get('DS_KI_FALLBACK') == 'True' and self.config.mlp_after_attn:
if self.config.pre_layer_norm:
tmp = (residual.float() + attention_output.float() + attention_bias.float() +
final_bias.float()) / self.config.mp_size + hidden_state.float()
else:
tmp = residual.float() + hidden_state.float() + final_bias.float()
input_dtype = hidden_state.dtype
residual = tmp.to(input_dtype)
else:
raise NotImplementedError
return residual
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from ..op_builder import AsyncIOBuilder
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.ops.op_builder import CPUAdagradBuilder
from deepspeed.utils.logging import should_log_le
class DeepSpeedCPUAdagrad(torch.optim.Optimizer):
optimizer_id = 0
def __init__(self, model_params, lr=1e-2, eps=1e-10, weight_decay=0, amsgrad=False, fp32_optimizer_states=True):
default_args = dict(lr=lr, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(DeepSpeedCPUAdagrad, self).__init__(model_params, default_args)
self.opt_id = DeepSpeedCPUAdagrad.optimizer_id
DeepSpeedCPUAdagrad.optimizer_id = DeepSpeedCPUAdagrad.optimizer_id + 1
self.fp32_optimizer_states = fp32_optimizer_states
self.ds_opt_adagrad = CPUAdagradBuilder().load()
self.ds_opt_adagrad.create_adagrad(self.opt_id, lr, eps, weight_decay, should_log_le("info"))
def __del__(self):
# need to destroy the C++ object explicitly to avoid a memory leak when deepspeed.initialize
# is used multiple times in the same process (notebook or pytest worker)
self.ds_opt_adagrad.destroy_adagrad(self.opt_id)
def __setstate__(self, state):
super(DeepSpeedCPUAdagrad, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None, fp16_param_groups=None):
"""Update the model parameters.
.. note::
This method will be called internally by ZeRO-Offload. DeepSpeed
users should still use ``engine.step()`` as shown in the
`Getting Started
<https://www.deepspeed.ai/getting-started/#training>`_ guide.
Args:
closure (callable, optional): closure to compute the loss.
Defaults to ``None``.
fp16_param_groups: FP16 GPU parameters to update. Performing the
copy here reduces communication time. Defaults to ``None``.
Returns:
loss: if ``closure`` is provided. Otherwise ``None``.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# intended device for step
device = torch.device('cpu')
for group_id, group in enumerate(self.param_groups):
for param_id, p in enumerate(group['params']):
if p.grad is None:
continue
assert p.device == device, f"CPUAdagrad param is on {p.device} and must be 'cpu', make " \
"sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config."
state = self.state[p]
# State initialization
if len(state) == 0:
#print(f'group {group_id} param {param_id} = {p.numel()}')
state['step'] = 0
#use full precision by default unless self.fp32_optimizer_states is off
state_dtype = torch.float if self.fp32_optimizer_states else p.dtype
#memory_format=torch.preserve_format)
# gradient variances
state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=state_dtype, device='cpu')
#memory_format=torch.preserve_format)
state['step'] += 1
if p.grad.is_sparse == True:
sparse_param = p.sparse_mask(p.grad)
sparse_exp_avg_sq = state['exp_avg_sq'].sparse_mask(p.grad)
self.ds_opt_adagrad.adagrad_update(self.opt_id, state['step'], group['lr'], group['eps'],
group['weight_decay'], sparse_param.values(), p.grad.values(),
sparse_exp_avg_sq.values())
p[sparse_param.indices()] = sparse_param.values()
state['exp_avg_sq'][sparse_exp_avg_sq.indices()] = sparse_exp_avg_sq.values()
if fp16_param_groups is not None:
fp16_param_groups[group_id][param_id][sparse_param.indices()] = sparse_param.values()
else:
if fp16_param_groups is not None:
self.ds_opt_adagrad.adagrad_update_copy(self.opt_id, state['step'], group['lr'], group['eps'],
group['weight_decay'], p.data, p.grad.data,
state['exp_avg_sq'],
fp16_param_groups[group_id][param_id].data)
else:
self.ds_opt_adagrad.adagrad_update(self.opt_id, state['step'], group['lr'], group['eps'],
group['weight_decay'], p.data, p.grad.data,
state['exp_avg_sq'])
return loss
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .cpu_adagrad import DeepSpeedCPUAdagrad
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .quantizer import ds_quantizer
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.ops.op_builder import QuantizerBuilder
# Cuda modules will be imported if needed
quantizer_cuda_module = None
def ds_quantizer(input, groups=1, bit_num=8, sr=False, asym=False):
# Load cuda modules if needed
global quantizer_cuda_module
if quantizer_cuda_module is None:
quantizer_cuda_module = QuantizerBuilder().load()
if sr:
if asym:
quantize_func = quantizer_cuda_module.ds_sr_quantize_asym_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_sr_quantize_asym_fp32
else:
quantize_func = quantizer_cuda_module.ds_sr_quantize_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_sr_quantize_fp32
else:
if asym:
quantize_func = quantizer_cuda_module.ds_quantize_asym_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_quantize_asym_fp32
else:
quantize_func = quantizer_cuda_module.ds_quantize_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_quantize_fp32
return quantize_func(input, groups, bit_num)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Copyright NVIDIA/apex
This file is adapted from fused adam in NVIDIA/apex, commit 6bd01c4
"""
import torch
from .multi_tensor_apply import MultiTensorApply
multi_tensor_applier = MultiTensorApply(2048 * 32)
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import FusedAdamBuilder
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This version of fused Adam implements 2 fusions.
* Fusion of the Adam update's elementwise operations
* A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
:class:`apex.optimizers.FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,
or ``torch.optim.Adam`` with ``adam_w_mode=False``::
opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
...
opt.step()
:class:`apex.optimizers.FusedAdam` may be used with or without Amp. If you wish to use :class:`FusedAdam` with Amp,
you may choose any ``opt_level``::
opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
...
opt.step()
In general, ``opt_level="O1"`` is recommended.
.. warning::
A previous version of :class:`FusedAdam` allowed a number of additional arguments to ``step``. These additional arguments
are now deprecated and unnecessary.
Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
True for decoupled weight decay(also known as AdamW) (default: True)
set_grad_none (bool, optional): whether set grad to None when zero_grad()
method is called. (default: True)
.. _Adam - A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
adam_w_mode=True,
weight_decay=0.,
amsgrad=False,
set_grad_none=True):
if amsgrad:
raise RuntimeError('FusedAdam does not support the AMSGrad variant.')
defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay)
super(FusedAdam, self).__init__(params, defaults)
self.adam_w_mode = 1 if adam_w_mode else 0
self.set_grad_none = set_grad_none
fused_adam_cuda = FusedAdamBuilder().load()
# Skip buffer
self._dummy_overflow_buf = get_accelerator().IntTensor([0])
self.multi_tensor_adam = fused_adam_cuda.multi_tensor_adam
def zero_grad(self):
if self.set_grad_none:
for group in self.param_groups:
for p in group['params']:
p.grad = None
else:
super(FusedAdam, self).zero_grad()
def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, grad_scaler=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.
"""
if any(p is not None for p in [grads, output_params, scale, grad_norms]):
raise RuntimeError(
'FusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.'
)
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
if len(group['params']) == 0:
continue
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' not in group:
group['step'] = 0
# create lists for multi-tensor apply
g_16, p_16, m_16, v_16 = [], [], [], []
g_bf, p_bf, m_bf, v_bf = [], [], [], []
g_32, p_32, m_32, v_32 = [], [], [], []
for p in group['params']:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError(
'FusedAdam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
# DeepSpeed ZeRO 3 processes each subgroup a time, so we need to keep tracking step count for each tensor separately.
# While this is not an issue for ZeRO 1 & 2, since they apply a single optimization step to the whole param group at the same time.
# In order to keep backward compatibility for the existing checkpoints, we use group['state'] to initialize state['step'] if it exists.
state['step'] = group.get('step', 0)
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if p.dtype == torch.float16:
g_16.append(p.grad.data)
p_16.append(p.data)
m_16.append(state['exp_avg'])
v_16.append(state['exp_avg_sq'])
elif p.dtype == torch.bfloat16:
g_bf.append(p.grad)
p_bf.append(p)
m_bf.append(state['exp_avg'])
v_bf.append(state['exp_avg_sq'])
elif p.dtype == torch.float32:
g_32.append(p.grad.data)
p_32.append(p.data)
m_32.append(state['exp_avg'])
v_32.append(state['exp_avg_sq'])
else:
raise RuntimeError('FusedAdam only support fp16, bf16 and fp32.')
if len(g_16) > 0:
state['step'] += 1
multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16],
group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
bias_correction, group['weight_decay'])
if len(g_bf) > 0:
state['step'] += 1
multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_bf, p_bf, m_bf, v_bf],
group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
bias_correction, group['weight_decay'])
if len(g_32) > 0:
state['step'] += 1
multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32],
group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
bias_correction, group['weight_decay'])
return loss
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .cpu_adam import DeepSpeedCPUAdam
from .fused_adam import FusedAdam
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Copyright NVIDIA/apex
This file is adapted from NVIDIA/apex, commit a109f85
"""
class MultiTensorApply(object):
def __init__(self, chunk_size):
self.chunk_size = chunk_size
def __call__(self, op, noop_flag_buffer, tensor_lists, *args):
return op(self.chunk_size, noop_flag_buffer, tensor_lists, *args)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from cpuinfo import get_cpu_info
from deepspeed.utils import logger
from deepspeed.utils.logging import should_log_le
from deepspeed.ops.op_builder import CPUAdamBuilder
class DeepSpeedCPUAdam(torch.optim.Optimizer):
optimizer_id = 0
def __init__(self,
model_params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
adamw_mode=True,
fp32_optimizer_states=True):
"""Fast vectorized implementation of two variations of Adam optimizer on CPU:
* Adam: A Method for Stochastic Optimization: (https://arxiv.org/abs/1412.6980);
* AdamW: Fixing Weight Decay Regularization in Adam (https://arxiv.org/abs/1711.05101)
DeepSpeed CPU Adam(W) provides between 5x to 7x speedup over torch.optim.adam(W).
In order to apply this optimizer, the model requires to have its master parameter (in FP32)
reside on the CPU memory.
To train on a heterogeneous system, such as coordinating CPU and GPU, DeepSpeed offers
the ZeRO-Offload technology which efficiently offloads the optimizer states into CPU memory,
with minimal impact on training throughput. DeepSpeedCPUAdam plays an important role to minimize
the overhead of the optimizer's latency on CPU. Please refer to ZeRO-Offload tutorial
(https://www.deepspeed.ai/tutorials/zero-offload/) for more information on how to enable this technology.
For calling step function, there are two options available: (1) update optimizer's states and (2) update
optimizer's states and copy the parameters back to GPU at the same time. We have seen that the second
option can bring 30% higher throughput than the doing the copy separately using option one.
.. note::
We recommend using our `config
<https://www.deepspeed.ai/docs/config-json/#optimizer-parameters>`_
to allow :meth:`deepspeed.initialize` to build this optimizer
for you.
Arguments:
model_params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in DeepSpeed CPUAdam!
adamw_mode: select between Adam and AdamW implementations (default: AdamW)
full_precision_optimizer_states: creates momentum and variance in full precision regardless of
the precision of the parameters (default: True)
"""
default_args = dict(lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
bias_correction=bias_correction,
amsgrad=amsgrad)
super(DeepSpeedCPUAdam, self).__init__(model_params, default_args)
cpu_info = get_cpu_info()
self.cpu_vendor = cpu_info["vendor_id_raw"].lower() if "vendor_id_raw" in cpu_info else "unknown"
if "amd" in self.cpu_vendor:
for group_id, group in enumerate(self.param_groups):
for param_id, p in enumerate(group['params']):
if p.dtype == torch.half:
logger.warning("FP16 params for CPUAdam may not work on AMD CPUs")
break
else:
continue
break
self.opt_id = DeepSpeedCPUAdam.optimizer_id
DeepSpeedCPUAdam.optimizer_id = DeepSpeedCPUAdam.optimizer_id + 1
self.adam_w_mode = adamw_mode
self.fp32_optimizer_states = fp32_optimizer_states
self.ds_opt_adam = CPUAdamBuilder().load()
self.ds_opt_adam.create_adam(self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode,
should_log_le("info"))
def __del__(self):
# need to destroy the C++ object explicitly to avoid a memory leak when deepspeed.initialize
# is used multiple times in the same process (notebook or pytest worker)
self.ds_opt_adam.destroy_adam(self.opt_id)
def __setstate__(self, state):
super(DeepSpeedCPUAdam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None, fp16_param_groups=None):
"""Update the model parameters.
.. note::
This method will be called internally by ZeRO-Offload. DeepSpeed
users should still use ``engine.step()`` as shown in the
`Getting Started
<https://www.deepspeed.ai/getting-started/#training>`_ guide.
Args:
closure (callable, optional): closure to compute the loss.
Defaults to ``None``.
fp16_param_groups: FP16 GPU parameters to update. Performing the
copy here reduces communication time. Defaults to ``None``.
Returns:
loss: if ``closure`` is provided. Otherwise ``None``.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# intended device for step
device = torch.device('cpu')
# converting the fp16 params to a group of parameter
if type(fp16_param_groups) is list:
if type(fp16_param_groups[0]) is not list:
fp16_param_groups = [fp16_param_groups]
elif fp16_param_groups is not None:
fp16_param_groups = [[fp16_param_groups]]
for group_id, group in enumerate(self.param_groups):
for param_id, p in enumerate(group['params']):
if p.grad is None:
continue
assert p.device == device, f"CPUAdam param is on {p.device} and must be 'cpu', make " \
"sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config."
state = self.state[p]
# State initialization
if len(state) == 0:
#print(f'group {group_id} param {param_id} = {p.numel()}')
state['step'] = 0
#use full precision by default unless self.fp32_optimizer_states is off
state_dtype = torch.float if self.fp32_optimizer_states else p.dtype
# gradient momentums
state['exp_avg'] = torch.zeros_like(p.data, dtype=state_dtype, device=device)
#memory_format=torch.preserve_format)
# gradient variances
state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=state_dtype, device=device)
#memory_format=torch.preserve_format)
state['step'] += 1
beta1, beta2 = group['betas']
if fp16_param_groups is not None:
self.ds_opt_adam.adam_update_copy(self.opt_id, state['step'], group['lr'], beta1, beta2,
group['eps'], group['weight_decay'], group['bias_correction'],
p.data, p.grad.data, state['exp_avg'], state['exp_avg_sq'],
fp16_param_groups[group_id][param_id].data)
else:
self.ds_opt_adam.adam_update(self.opt_id, state['step'], group['lr'], beta1, beta2, group['eps'],
group['weight_decay'], group['bias_correction'], p.data, p.grad.data,
state['exp_avg'], state['exp_avg_sq'])
return loss
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.runtime.config_utils import get_scalar_param, get_dict_param, DeepSpeedConfigObject
from deepspeed.autotuning.constants import *
class DeepSpeedAutotuningConfig(DeepSpeedConfigObject):
def __init__(self, param_dict):
super(DeepSpeedAutotuningConfig, self).__init__()
self.enabled = None
self.start_step = None
self.end_step = None
self.metric_path = None
self.arg_mappings = None
self.metric = None
self.model_info = None
self.results_dir = None
self.exps_dir = None
self.overwrite = None
if param_dict and AUTOTUNING in param_dict.keys():
autotuning_dict = param_dict[AUTOTUNING]
else:
autotuning_dict = {}
self._initialize(autotuning_dict)
def _initialize(self, autotuning_dict):
self.enabled = get_scalar_param(autotuning_dict, AUTOTUNING_ENABLED, AUTOTUNING_ENABLED_DEFAULT)
self.fast = get_scalar_param(autotuning_dict, AUTOTUNING_FAST, AUTOTUNING_FAST_DEFAULT)
self.results_dir = get_scalar_param(autotuning_dict, AUTOTUNING_RESULTS_DIR, AUTOTUNING_RESULTS_DIR_DEFAULT)
assert self.results_dir, "results_dir cannot be empty"
self.exps_dir = get_scalar_param(autotuning_dict, AUTOTUNING_EXPS_DIR, AUTOTUNING_EXPS_DIR_DEFAULT)
assert self.exps_dir, "exps_dir cannot be empty"
self.overwrite = get_scalar_param(autotuning_dict, AUTOTUNING_OVERWRITE, AUTOTUNING_OVERWRITE_DEFAULT)
self.start_profile_step = get_scalar_param(autotuning_dict, AUTOTUNING_START_PROFILE_STEP,
AUTOTUNING_START_PROFILE_STEP_DEFAULT)
self.end_profile_step = get_scalar_param(autotuning_dict, AUTOTUNING_END_PROFILE_STEP,
AUTOTUNING_END_PROFILE_STEP_DEFAULT)
self.metric = get_scalar_param(autotuning_dict, AUTOTUNING_METRIC, AUTOTUNING_METRIC_DEFAULT)
self.metric_path = get_scalar_param(autotuning_dict, AUTOTUNING_METRIC_PATH, AUTOTUNING_METRIC_PATH_DEFAULT)
self.tuner_type = get_scalar_param(autotuning_dict, AUTOTUNING_TUNER_TYPE, AUTOTUNING_TUNER_TYPE_DEFAULT)
self.tuner_early_stopping = get_scalar_param(autotuning_dict, AUTOTUNING_TUNER_EARLY_STOPPING,
AUTOTUNING_TUNER_EARLY_STOPPING_DEFAULT)
self.tuner_num_trials = get_scalar_param(autotuning_dict, AUTOTUNING_TUNER_NUM_TRIALS,
AUTOTUNING_TUNER_NUM_TRIALS_DEFAULT)
self.arg_mappings = get_dict_param(autotuning_dict, AUTOTUNING_ARG_MAPPINGS, AUTOTUNING_ARG_MAPPINGS_DEFAULT)
self.model_info = get_model_info_config(autotuning_dict)
self.model_info_path = get_scalar_param(autotuning_dict, AUTOTUNING_MODEL_INFO_PATH,
AUTOTUNING_MODEL_INFO_PATH_DEFAULT)
self.mp_size = get_scalar_param(autotuning_dict, AUTOTUNING_MP_SIZE, AUTOTUNING_MP_SIZE_DEFAULT)
self.max_train_batch_size = get_dict_param(autotuning_dict, AUTOTUNING_MAX_TRAIN_BATCH_SIZE,
AUTOTUNING_MAX_TRAIN_BATCH_SIZE_DEFAULT)
self.min_train_batch_size = get_dict_param(autotuning_dict, AUTOTUNING_MIN_TRAIN_BATCH_SIZE,
AUTOTUNING_MIN_TRAIN_BATCH_SIZE_DEFAULT)
self.max_train_micro_batch_size_per_gpu = get_dict_param(
autotuning_dict, AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU,
AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT)
self.min_train_micro_batch_size_per_gpu = get_dict_param(
autotuning_dict, AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU,
AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT)
self.num_tuning_micro_batch_sizes = get_dict_param(autotuning_dict, AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES,
AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES_DEFAULT)
def get_model_info_config(param_dict):
if MODEL_INFO in param_dict and param_dict[MODEL_INFO] is not None:
model_info_config = {}
for key, default_value in MODEL_INFO_KEY_DEFAULT_DICT.items():
model_info_config[key] = get_scalar_param(param_dict[MODEL_INFO], key, default_value)
return model_info_config
return None
def get_default_model_info_config():
return MODEL_INFO_KEY_DEFAULT_DICT
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
#########################################
# autotuner implementation constants
#########################################
import os
DEFAULT_TEMPLATE_PATH_ZERO_0 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
"template_zero0.json")
DEFAULT_TEMPLATE_PATH_ZERO_1 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
"template_zero1.json")
DEFAULT_TEMPLATE_PATH_ZERO_2 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
"template_zero2.json")
DEFAULT_TEMPLATE_PATH_ZERO_3 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
"template_zero3.json")
METRIC_PERCENT_DIFF_CONST = 0.05
DS_CONFIG = "ds_config"
BUFSIZE = 1 # line buffer size for writing files
#########################################
# autotuner configuration constants
#########################################
# Autotuner. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
AUTOTUNING_FORMAT = """
autotuner should be enabled as:
"session_params": {
"autotuning": {
"enabled": true,
"start_step": 5,
"end_step": 15
}
}
"""
AUTOTUNING = "autotuning"
AUTOTUNING_ENABLED = "enabled"
AUTOTUNING_ENABLED_DEFAULT = False
AUTOTUNING_FAST = "fast"
AUTOTUNING_FAST_DEFAULT = True
AUTOTUNING_RESULTS_DIR = "results_dir"
AUTOTUNING_RESULTS_DIR_DEFAULT = "autotuning_results"
AUTOTUNING_EXPS_DIR = "exps_dir"
AUTOTUNING_EXPS_DIR_DEFAULT = "autotuning_exps"
AUTOTUNING_OVERWRITE = "overwrite"
AUTOTUNING_OVERWRITE_DEFAULT = True
AUTOTUNING_START_PROFILE_STEP = "start_profile_step"
AUTOTUNING_START_PROFILE_STEP_DEFAULT = 3
AUTOTUNING_END_PROFILE_STEP = "end_profile_step"
AUTOTUNING_END_PROFILE_STEP_DEFAULT = 5
AUTOTUNING_METRIC_PATH = "metric_path"
AUTOTUNING_METRIC_PATH_DEFAULT = None
AUTOTUNING_TUNER_TYPE = "tuner_type"
AUTOTUNING_TUNER_GRIDSEARCH = "gridsearch"
AUTOTUNING_TUNER_RANDOM = "random"
AUTOTUNING_TUNER_MODELBASED = "model_based"
AUTOTUNING_TUNER_TYPE_DEFAULT = AUTOTUNING_TUNER_GRIDSEARCH
AUTOTUNING_TUNER_EARLY_STOPPING = "tuner_early_stopping"
AUTOTUNING_TUNER_EARLY_STOPPING_DEFAULT = 5
AUTOTUNING_TUNER_NUM_TRIALS = "tuner_num_trials"
AUTOTUNING_TUNER_NUM_TRIALS_DEFAULT = 50
AUTOTUNING_ARG_MAPPINGS = "arg_mappings"
AUTOTUNING_ARG_MAPPINGS_DEFAULT = None
AUTOTUNING_MAX_TRAIN_BATCH_SIZE = "max_train_batch_size"
AUTOTUNING_MAX_TRAIN_BATCH_SIZE_DEFAULT = None
AUTOTUNING_MIN_TRAIN_BATCH_SIZE = "min_train_batch_size"
AUTOTUNING_MIN_TRAIN_BATCH_SIZE_DEFAULT = 1
AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU = "max_train_micro_batch_size_per_gpu"
AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT = 1024
AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU = "min_train_micro_batch_size_per_gpu"
AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT = 1
AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES = "num_tuning_micro_batch_sizes"
AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES_DEFAULT = 3
AUTOTUNING_MP_SIZE = "mp_size"
AUTOTUNING_MP_SIZE_DEFAULT = 1
AUTOTUNING_METRIC = "metric"
AUTOTUNING_METRIC_LATENCY = "latency"
AUTOTUNING_METRIC_THROUGHPUT = "throughput"
AUTOTUNING_METRIC_FLOPS = "flops"
AUTOTUNING_METRIC_FORWARD = "forward"
AUTOTUNING_METRIC_BACKWRAD = "flops"
AUTOTUNING_METRIC_STEPS = "step"
AUTOTUNING_METRIC_DEFAULT = AUTOTUNING_METRIC_THROUGHPUT
#########################################
# MODEL INFO
#########################################
AUTOTUNING_MODEL_INFO_PATH = "model_info_path"
AUTOTUNING_MODEL_INFO_PATH_DEFAULT = None
MODEL_INFO_FORMAT = '''
"model_info": {
"num_params": 1000000000,
"hidden_size": 10,
"num_layers": 12,
}
'''
MODEL_INFO = "model_info"
MODEL_INFO_PROFILE = "profile"
MODEL_INFO_PROFILE_DEFAULT = False
MODEL_INFO_NUM_PARAMS = "num_params"
MODEL_INFO_NUM_PARAMS_DEFAULT = None
MODEL_INFO_HIDDEN_SIZE = "hidden_size"
MODEL_INFO_HIDDEN_SIZE_DEFAULT = None
MODEL_INFO_NUM_LAYERS = "num_layers"
MODEL_INFO_NUM_LAYERS_DEFAULT = None
MODEL_INFO_KEY_DEFAULT_DICT = {
MODEL_INFO_PROFILE: MODEL_INFO_PROFILE_DEFAULT,
MODEL_INFO_NUM_PARAMS: MODEL_INFO_NUM_PARAMS_DEFAULT,
MODEL_INFO_HIDDEN_SIZE: MODEL_INFO_HIDDEN_SIZE_DEFAULT,
MODEL_INFO_NUM_LAYERS: MODEL_INFO_NUM_LAYERS_DEFAULT
}
#########################################
# autotuner search space constants
#########################################
DEFAULT_HF_CONFIG = {
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"gradient_accumulation_steps": "auto",
}
DEFAULT_MIN_MEM_CONFIG = {
"train_micro_batch_size_per_gpu": 1,
"zero_optimization": {
"stage": 3
},
"memory_break_down": False
}
DEFAULT_TUNING_SPACE_ZERO_0 = {"zero_optimization": {"stage": 0}}
DEFAULT_TUNING_SPACE_ZERO_1 = {
"zero_optimization": {
"stage": 1,
"reduce_bucket_size": [5e7, 5e8, 1e9],
"allgather_bucket_size": [5e7, 5e8, 1e9],
}
}
DEFAULT_TUNING_SPACE_ZERO_2 = {
"zero_optimization": {
"stage": 2,
"overlap_comm": [True, False],
"reduce_scatter": [False, True],
"reduce_bucket_size": [5e7, 5e8, 1e9],
"allgather_bucket_size": [5e7, 5e8, 1e9],
"contiguous_gradients": [False, True]
},
}
DEFAULT_TUNING_SPACE_ZERO_3 = {
"zero_optimization": {
"stage": 3,
"overlap_comm": [True, False],
"reduce_scatter": [False, True],
"reduce_bucket_size": [5e7, 5e8, 1e9],
"allgather_partitions": [True, False],
"allgather_bucket_size": [5e7, 5e8, 1e9],
"contiguous_gradients": [False, True]
},
}
GLOBAL_TUNING_SPACE = 'global'
# TUNING_MICRO_BATCH_SIZE_PREFIX="tune_micro_batch_size_z"
TUNING_MICRO_BATCH_SIZE_PREFIX = "z"
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .autotuner import Autotuner
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import re
import collections.abc
import os
import json
from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, TRAIN_MICRO_BATCH_SIZE_PER_GPU
import itertools
import copy
from ..utils import logger
def search_error(filename):
if not os.path.exists(filename):
return "stderr.log does not exist"
with open(filename) as f:
for line in f:
for s in ["Error", "error", "ERROR"]:
idx = line.find(s)
if idx != -1:
return line[idx + len(s):].lstrip(": ")
return None
def was_interruptted(filename):
if not os.path.exists(filename):
return "stderr.log does not exist"
with open(filename) as f:
for line in f:
s = "KeyboardInterrupt"
idx = line.find(s)
if idx != -1:
return True
return False
def find_replace_str(value, replace_dict):
if not isinstance(value, str):
return str(value)
matches = re.findall(r"\$[A-Za-z0-9_]+", value)
for var in matches:
var_key = var.replace("$", "").lower()
if var_key == "nvme_path":
continue
assert var_key in replace_dict, f"unknown var key: {var_key}, in {replace_dict}"
if isinstance(replace_dict[var_key], str):
value = value.replace(var, replace_dict[var_key])
else:
assert len(matches) == 1, "unable to replace multiple non-string matches"
value = replace_dict[var_key]
return value
def find_replace(target, replace_dict):
if isinstance(target, dict):
for key, value in target.items():
if isinstance(value, str):
target[key] = find_replace_str(value, replace_dict)
if isinstance(value, list):
for i in range(len(value)):
value[i] = find_replace_str(value[i], replace_dict)
if isinstance(value, dict):
find_replace(value, replace_dict)
elif isinstance(target, list):
for i in range(len(target)):
target[i] = str(find_replace_str(target[i], replace_dict))
def get_list(val):
if not isinstance(val, list):
return [val]
else:
return val
def combine_dict(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = combine_dict(d.get(k, {}), v)
else:
if k not in d:
d[k] = v
else:
if not isinstance(d[k], list):
d[k] = [d[k]]
d[k].extend(i for i in get_list(v) if i not in d[k])
return d
def del_if_exists(t, d):
"""Deletes a key from a dictionary if it exists.
Args:
t (string): target key to delete
d (dict): dictionary to delete from
"""
if t in d:
del d[t]
return
for k, v in d.items():
if isinstance(v, collections.abc.Mapping):
del_if_exists(t, v)
def replace_dict(d, u, ignored_keys=[]):
"""Replaces values in dict d with values in dict u.
Args:
d (dict): the target dict to overwrite
u (dict): the dict containing the values to overwrite the target dict
Returns:
dict d with values overwritten by the corresponding ones in dict u.
"""
if u is not None:
for k, v in u.items():
if k not in ignored_keys:
if v is None:
del_if_exists(k, d)
continue
if isinstance(v, collections.abc.Mapping):
d[k] = replace_dict(d.get(k, {}), v, ignored_keys)
else:
d[k] = v
return d
def get_val_by_key(d: dict, k):
if k in d:
return d[k]
for v in d.values():
if isinstance(v, dict):
return get_val_by_key(v, k)
return None
def set_val_by_key(d: dict, k, vv):
if k in d:
d[k] = vv
for v in d.values():
if isinstance(v, dict):
set_val_by_key(v, k, vv)
def fetch_hostfile(hostfile_path):
if not os.path.isfile(hostfile_path):
logger.warning("Unable to find hostfile, will proceed with training "
"with local resources only.")
return None
# e.g., worker-0 slots=16
with open(hostfile_path, 'r') as fd:
resource_pool = collections.OrderedDict()
for line in fd.readlines():
line = line.strip()
if line == '':
# skip empty lines
continue
try:
hostname, slots = line.split()
_, slot_count = slots.split("=")
slot_count = int(slot_count)
except ValueError as err:
logger.error("Hostfile is not formatted correctly, unable to "
"proceed with training.")
raise err
if hostname in resource_pool:
logger.error("Hostfile contains duplicate hosts, unable to "
"proceed with training.")
raise ValueError("host {} is already defined".format(hostname))
resource_pool[hostname] = slot_count
return resource_pool
def validate_ds_config(config: dict):
def is_False(config: dict, key):
if config is None:
return False
return bool(config.get(key))
config_zero = config.get("zero_optimization", {})
if not config_zero:
return True
stage = config_zero.get("stage")
offload = False
if stage == 1:
return True
elif stage == 2:
if is_False(config_zero, "cpu_offload") and is_False(config_zero, "cpu_offload_params"):
return False
elif stage == 3:
offload_devices = ["cpu", "nvme"]
if config_zero.get("offload_optimizer", {}).get("device") in offload_devices:
offload = True
if config_zero.get("offload_param", {}).get("device") in offload_devices:
offload = True
else:
return True
# HF requires that "ZeRO Offload can only work with DeepSpeed optimizers"
if offload and not config.get("optimizer"):
return False
return True
def remove_dupe_dicts(l):
""" Removes duplicate dictionaries from a list. Uses list comprehension and the json library to sort and stringify each dictionary and the set data type to ensure unique values. Works with nested data structures.
Args:
l (list): a list of (nested) data structures.
Returns:
A list of unique values.
"""
list_of_strings = [json.dumps(d, sort_keys=True) for d in l]
list_of_strings = set(list_of_strings)
return [json.loads(s) for s in list_of_strings]
def prune_config(config, ignored_keys=[]):
""" Prunes the input configurations
Args:
configs (dict): A configuration dictionary.
ignored_keys (list, optional): the keys of the sections to delete. Defaults to [].
Returns:
A configuration dictionary.
"""
if ignored_keys:
for k in ignored_keys:
def find_del_key(d: dict, k: str):
if k in d:
del d[k]
else:
for dd in d.values():
if isinstance(dd, dict):
find_del_key(dd, k)
find_del_key(config, k)
def prune_configs(configs, ignored_keys=[]):
""" Prunes the input list of configurations
Args:
configs (list): A list of configuration dictionaries.
ignored_keys (list, optional): the keys of the sections to delete. Defaults to [].
Returns:
A list of valid and unique configuration dictionaries.
"""
pruned_list = []
for config in configs:
prune_config(config, ignored_keys)
pruned_list.append(config)
return remove_dupe_dicts(pruned_list)
def get_tuning_keys(tuning_space: dict):
"""Outputs the list of tunable parameters in the tuning space dict.
Args:
tuning_space (dict): a configuration dictionary containing tunable parameters as lists of values.
Returns:
A list of strings
"""
tuning_keys = []
for key, val in tuning_space.items():
if isinstance(val, dict):
tuning_keys.extend(get_tuning_keys(val))
if isinstance(val, list) and len(val) > 1:
tuning_keys.append(key)
return tuning_keys
def get_all_configs(tuning_space: dict, ignore_keys=None):
""" Splits the tuning space dictionary to result in all combinations of values.
Args:
tuning_space (dict): the tuning space where tunable parameters are lists of values.
"""
def gen_combinations(d: dict):
keys, values = d.keys(), d.values()
for v in values:
if not isinstance(v, list):
v = [v]
values_choices = (gen_combinations(v) if isinstance(v, dict) else get_list(v) for v in values)
for comb in itertools.product(*values_choices):
yield dict(zip(keys, comb))
all_configs = []
ignored_key_vals = {}
for ik in ignore_keys:
ignored_key_vals[ik] = tuning_space.get(ik, {})
del_if_exists(ik, tuning_space)
for c in gen_combinations(tuning_space):
replace_dict(c, ignored_key_vals)
all_configs.append(c)
return all_configs
def canonical_name(config: dict, tuning_keys=None, prefix="", omit_val=False):
""" Generates a name from the acronyms of the tuning keys in the config dict. TRAIN_MICRO_BATCH_SIZE_PER_GPU is always included in the tuning keys.
Args:
config (dict): the config dict used to generate the name
tuning_keys (list, optional): the tuning keys used to generate the name. Defaults to None.
prefix (str, optional): a string added to the beginning of the name. Defaults to None.
"""
if TRAIN_MICRO_BATCH_SIZE_PER_GPU not in tuning_keys:
tuning_keys.append(TRAIN_MICRO_BATCH_SIZE_PER_GPU)
if GRADIENT_ACCUMULATION_STEPS not in tuning_keys:
tuning_keys.append(GRADIENT_ACCUMULATION_STEPS)
tuning_keys.sort()
def get_offload_name(offload_config):
cname = ""
if offload_config is None:
return "None_"
for key, val in offload_config.items():
key = "".join(map(lambda c: c[0], key.split('_')))
if (isinstance(val, int) or isinstance(val, float)) and val > 9000:
cname += key + '{:.1e}'.format(val) + "_"
else:
if isinstance(val, bool):
val = "T" if val else "F"
cname += f"{key}{val}_"
return cname
def get_name_by_keys(config: dict, tuning_keys=None, omit_val=False):
cname = ""
if not tuning_keys or config is None:
return cname
for key, val in config.items():
# skip the arg_mappings section when naming the exp file
if key == "arg_mappings":
continue
if key == "offload_param":
cname += "op_"
if not omit_val:
cname += get_offload_name(val)
continue
if key == "offload_optimizer":
cname += "oo_"
if not omit_val:
cname += get_offload_name(val)
continue
# recursively call the func to get name for the child dicts
if isinstance(val, dict):
n = get_name_by_keys(val, tuning_keys, omit_val=omit_val)
if n != "":
cname += n + "_"
if tuning_keys and key not in tuning_keys:
continue
key_str = "".join(map(lambda c: c[0], key.split('_')))
if not omit_val:
if (isinstance(val, int) or isinstance(val, float)) and val > 9000:
cname += key_str + '{:.1e}'.format(val) + "_"
else:
if isinstance(val, bool):
val = "T" if val else "F"
cname += f"{key_str}{val}_"
else:
cname += key_str + "_"
return cname[:-1]
name = get_name_by_keys(config, tuning_keys, omit_val=omit_val)
return prefix + (name if name != "" else "exp")
def get_first_config(config: dict):
if not config:
return None
cfg = copy.deepcopy(config)
for key, val in cfg.items():
if isinstance(val, dict):
if key == "optimizer": # use user defined optimizer which might have lists of values as params
cfg[key] = val
else:
cfg[key] = get_first_config(val)
if isinstance(val, list) and len(val) > 0:
cfg[key] = val[0]
return cfg
def write_experiments(exps: list, exps_dir: str):
exp_paths = []
for exp in exps:
exp_name = exp['name']
# write the expr config to a json file
exp_path = os.path.join(exps_dir, f'{exp_name}.json')
with open(exp_path, 'w') as fd:
json.dump(exp, fd)
exp_paths.append(exp_path)
return exp_paths
def memory_to_string(n, postfix="", units=None, precision=2):
if units is None:
if n // 10**12 > 0:
return str(round(n / 1024**4, precision)) + " T" + postfix
if n // 10**9 > 0:
return str(round(n / 1024**3, precision)) + " G" + postfix
elif n // 10**6 > 0:
return str(round(n / 1024**2, precision)) + " M" + postfix
elif n // 10**3 > 0:
return str(round(n / 1014, precision)) + " K" + postfix
else:
return str(n) + " "
else:
if units == "T":
return str(round(n / 1024**4, precision)) + " " + units
if units == "G" + postfix:
return str(round(n / 1024**3, precision)) + " " + units
elif units == "M" + postfix:
return str(round(n / 1024**2, precision)) + " " + units
elif units == "K" + postfix:
return str(round(n / 1024, precision)) + " " + units
else:
return str(n) + " "
def number_to_string(n, postfix="", units=None, precision=2):
if units is None:
if n // 10**9 > 0:
return str(round(n / 1000**3, precision)) + " B" + postfix
if n // 10**6 > 0:
return str(round(n / 1000**2, precision)) + " M" + postfix
elif n // 10**3 > 0:
return str(round(n / 1000**1, precision)) + " K" + postfix
else:
return str(n) + " "
else:
if units == "B" + postfix:
return str(round(n / 1000**3, precision)) + " " + units
elif units == "M" + postfix:
return str(round(n / 1000**2, precision)) + " " + units
elif units == "K" + postfix:
return str(round(n / 1000**1, precision)) + " " + units
else:
return str(n) + " "
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import copy
from numpy import BUFSIZE
import json
import subprocess
import sys
import threading
import time
import base64
import os
import hjson
from tqdm import tqdm
from ..utils import logger
from .constants import AUTOTUNING, AUTOTUNING_METRIC_PATH
from .utils import get_val_by_key, search_error, was_interruptted
"""
thread-0: loop over experiment queue dispatching experiments if they become available
thread-N: start each experiment in its own thread
"""
from deepspeed import comm as dist
TIMEOUT = 5
class ResourceManager:
def __init__(self, args, hosts, num_gpus_per_node, results_dir, exps_dir, arg_mappings):
self.results_dir = results_dir
self.exps_dir = exps_dir
self.nodes = []
self.num_gpus_per_node = num_gpus_per_node
for host in hosts:
self.nodes.append(Node(host, num_gpus_per_node))
self.experiment_queue = []
self.running_experiments = {}
self.finished_experiments = {}
self.experiment_count = 0
self.exp_paths = set()
self.args = args
self.arg_mappings = {}
if arg_mappings is not None:
for k, v in arg_mappings.items():
k = k.strip()
v = v.strip()
if k not in self.arg_mappings:
self.arg_mappings[k] = v
def schedule_experiments(self, exp_paths):
for exp_path in exp_paths:
if exp_path in self.exp_paths:
continue
else:
self.exp_paths.add(exp_path)
with open(exp_path, "r") as fd:
exp = hjson.load(fd)
exp["exp_id"] = self.experiment_count
self.experiment_count += 1
result_dir = exp["result_dir"] = os.path.join(self.results_dir, exp['name'])
if AUTOTUNING in exp["ds_config"]:
metric_file = os.path.join(result_dir, "metrics.json")
exp["ds_config"][AUTOTUNING][AUTOTUNING_METRIC_PATH] = metric_file
stderr_file = os.path.join(result_dir, "stderr.log")
model_info_file = os.path.join(result_dir, "model_info.json")
metric_file = os.path.join(result_dir, "metrics.json")
# skip existing experiments (except for the ones that were interrupted)
if os.path.exists(result_dir) and os.path.exists(stderr_file):
if not was_interruptted(stderr_file):
err = search_error(stderr_file)
exp_id = exp["exp_id"]
self.finished_experiments[exp_id] = (exp, err)
if err or os.path.exists(metric_file) or os.path.exists(model_info_file):
logger.info(f"Skipping exp {exp['name']} whose result already exists")
continue
self.experiment_queue.append(exp)
def run_job(self, exp: dict, reservations):
exp_id = exp["exp_id"]
exp["master_port"] = self.args.master_port + exp_id
exp["result_dir"] = os.path.join(self.results_dir, exp['name'])
user_script = self.args.user_script
user_args = self.args.user_args
# overwrite the user arg in the arg_mappings
for key, val in self.arg_mappings.items():
nval = get_val_by_key(exp, key)
if nval and str(nval) != "auto":
if val in user_args:
idx = user_args.index(val)
user_args[idx + 1] = str(nval)
else:
user_args.append(val)
user_args.append(str(nval))
t = threading.Thread(target=run_experiment, args=(exp, reservations, user_script, user_args))
t.start()
self.running_experiments[exp_id] = (t, exp, reservations, time.time())
def experiment_check(self, pbar):
finished_exps = []
for exp_id, exp_data in self.running_experiments.items():
thread, exp_json, reservations, start_time = exp_data
logger.debug(f"Checking exp_id = {exp_id}, alive = {thread.is_alive()}")
thread.join(timeout=TIMEOUT)
if not thread.is_alive():
exp_dir = exp_json["result_dir"]
stderr_file = os.path.join(exp_dir, "stderr.log")
err = search_error(stderr_file)
finished_exps.append((exp_id, reservations))
self.finished_experiments[exp_id] = (exp_json, err)
duration = time.time() - start_time
logger.debug(f"Finished exp_id = {exp_id}, duration={duration:.2f} sec")
pbar.update(len(finished_exps))
for exp_id, reservations in finished_exps:
for reservation in reservations:
reservation.restore_slots()
self.running_experiments.pop(exp_id)
time.sleep(TIMEOUT)
def resource_request(self, exp):
num_gpus, num_nodes = exp['num_gpus'], exp['num_nodes']
slot_request = num_gpus
reservations = []
for node in self.nodes:
if num_nodes == 0:
break
slots = node.reserve_slots(slot_request=slot_request)
if slots:
reservations.append(Reservation(node=node, slots=slots))
num_nodes -= 1
if num_nodes == 0:
# request satisfied
return reservations
else:
# request not satisfied
for reservation in reservations:
reservation.restore_slots()
def status(self):
status = ""
for node in self.nodes:
status += f"{node.host} ({len(node.idle_slots)} idle gpus), "
return status[:-1]
def run(self):
pbar = tqdm(total=len(self.experiment_queue))
while len(self.experiment_queue) > 0:
exp = self.experiment_queue.pop(0)
logger.debug(f'Popped exp_id = {exp["exp_id"]} from the queue')
logger.debug(f'Resource status: {self.status()}')
reservations = self.resource_request(exp)
if not reservations:
logger.debug(f'Unable to schedule exp_id = {exp["exp_id"]}')
self.experiment_queue.insert(0, exp)
logger.debug(f'Put exp_id = {exp["exp_id"]} back into the queue')
self.experiment_check(pbar)
else:
desc = ""
for reservation in reservations:
reservation.slots.sort()
slots = ",".join(map(str, reservation.slots))
desc += f"{reservation.node.host}:{slots}@"
desc = desc[:-1]
logger.debug(f'Running exp_id = {exp["exp_id"]} on {desc}')
self.run_job(exp, reservations)
# All pending experiments are scheduled, waiting for them to complete
while len(self.running_experiments) > 0:
self.experiment_check(pbar)
def save_exp_results_to_database(self, message, ranks=None, path=None):
"""Print message when one of following condition meets
+ not dist.is_initialized()
+ dist.get_rank() in ranks if ranks is not None or ranks = [-1]
Args:
message (str)
ranks (list)
path (str)
"""
should_log = not dist.is_initialized()
ranks = ranks or []
my_rank = dist.get_rank() if dist.is_initialized() else -1
if ranks and not should_log:
should_log = ranks[0] == -1
should_log = should_log or (my_rank in set(ranks))
logger.debug(f"*** Should log: {should_log}")
if should_log:
message['rank'] = my_rank
with open(path, 'a') as outfile:
json.dump(message, outfile)
outfile.write('\n')
def parse_results(self, metric):
""" Parses the metric file of the finished experiments to select the optimal DeepSpeed configuration.
Args:
finished_experiments (dcit): a dictionary of experiment id and experiment description.
Returns:
The path to the result folder of the experiment with the optimal configuration.
"""
max_throughput = sys.float_info.min
best_exp_id = -1
for exp_id, (exp, err) in self.finished_experiments.items():
if err:
logger.info(
f"The experiment exp_id = {exp_id}, exp_name = {exp['name']}, did not run successfully with error = {err}, thus a metrics.txt does not exist for it. Check the stderr.log in {exp['result_dir']}"
)
continue
metric_file = exp["ds_config"][AUTOTUNING][AUTOTUNING_METRIC_PATH]
if os.path.exists(metric_file):
with open(metric_file, 'r') as f:
results = hjson.load(f)
curr_throughput = results[metric]
if curr_throughput > max_throughput:
max_throughput = curr_throughput
best_exp_id = exp_id
exp['results'] = results
if best_exp_id != -1:
best_exp, _ = self.finished_experiments[best_exp_id]
return best_exp, max_throughput
return exp, None
def clear(self):
"""Clear experiment queues, does not reset self.experiment_count
"""
self.experiment_queue = []
# clean up the running experiments
for exp_id, exp_data in self.running_experiments.items():
thread, exp_json, reservations, start_time = exp_data
clean_up(exp_json, reservations)
self.running_experiments = {}
self.finished_experiments = {}
self.exp_paths = set()
class Node:
def __init__(self, host, max_slots):
self.host = host
self.max_slots = max_slots
self.idle_slots = list(range(max_slots))
def reserve_slots(self, slot_request: int) -> list:
if len(self.idle_slots) >= slot_request:
return [self.idle_slots.pop(0) for _ in range(slot_request)]
def restore_slots(self, slots: list):
self.idle_slots += slots
class Reservation:
def __init__(self, node, slots):
self.node = node
self.slots = slots
def restore_slots(self):
self.node.restore_slots(self.slots)
def desc(self):
slots = ",".join(map(str, self.slots))
return f"{self.node.host}:{slots}@"
def get_job_id():
# Infrastructure-specific job-id
infra_job_id = None
if "DLWS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLWS_JOB_ID"]
elif "DLTS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLTS_JOB_ID"]
else:
infra_job_id = "unknown-job-id"
return infra_job_id
def get_user():
user = None
if "USER" in os.environ:
user = os.environ["USER"]
else:
user = "unknown-user"
return user
def run_experiment(exp: dict, reservations, user_script, user_args):
include_str = ""
for reservation in reservations:
reservation.slots.sort()
slots = ",".join(map(str, reservation.slots))
include_str += f"{reservation.node.host}:{slots}@"
include_str = include_str[:-1]
master_port = exp["master_port"]
exp["launcher_args"] = [
"--include",
f"{include_str}",
"--master_port",
str(master_port),
]
logger.debug(f'launcher args={exp["launcher_args"]}')
exp["user"] = get_user()
exp["job_id"] = get_job_id()
exp_dir = exp["result_dir"]
os.makedirs(exp_dir, exist_ok=True)
ds_config_path = os.path.join(exp_dir, "ds_config.json")
exp["ds_config_path"] = ds_config_path
ds_config = copy.deepcopy(exp["ds_config"])
ds_config_json = json.dumps(ds_config).encode('utf-8')
exp["ds_config_base64"] = base64.urlsafe_b64encode(ds_config_json).decode('utf-8')
with open(exp["ds_config_path"], "w", buffering=BUFSIZE) as fd:
json.dump(ds_config, fd)
fd.flush()
os.fsync(fd)
path = exp["ds_config_path"]
logger.info(f"Scheduler wrote ds_config to {path}, {os.path.abspath(path)}")
with open(os.path.join(exp_dir, "exp.json"), "w", buffering=BUFSIZE) as fd:
json.dump(exp, fd)
fd.flush()
os.fsync(fd)
path = os.path.join(exp_dir, "exp.json")
logger.info(f"Scheduler wrote exp to {path}, {os.path.abspath(path)}")
# remove "--deepspeed_config ds_config.json" from user_args
if user_args:
if "--deepspeed_config" in user_args:
idx = user_args.index("--deepspeed_config")
# "--deepspeed_config" is omitted in HF
elif "--deepspeed" in user_args:
idx = user_args.index("--deepspeed")
assert idx < len(user_args), "there is no ds_config file specified after --deepspeed_config or --deepspeed"
# user_args[idx + 1] = exp["ds_config_path"]
# pass base64 serialized ds_config to launcher
user_args[idx + 1] = exp["ds_config_base64"]
exp["user_script"] = user_script
exp["user_args"] = user_args
cmd = ["deepspeed"] + exp["launcher_args"] + [user_script] + user_args
assert len(exp["launcher_args"]) > 0, "must provide launcher args"
with open(os.path.join(exp_dir, "cmd.txt"), "w", buffering=BUFSIZE) as fd:
fd.write(" ".join(cmd))
fd.write("\n")
fd.flush()
os.fsync(fd)
logger.info(
f"Launching exp_id = {exp['exp_id']}, exp_name = {exp['name']}, with resource = {include_str}, and ds_config = {os.path.abspath(ds_config_path)}"
)
with open(os.path.join(exp_dir, "stdout.log"), "wb") as out, open(os.path.join(exp_dir, "stderr.log"),
"wb") as err:
result = subprocess.Popen(cmd, stdout=out, stderr=err)
result.wait()
out.flush()
err.flush()
os.fsync(out)
os.fsync(err)
clean_up(exp, reservations)
logger.info(f"Done running exp_id = {exp['exp_id']}, exp_name = {exp['name']}, with resource = {include_str}")
PDSH_MAX_FAN_OUT = 1024
def clean_up(exp: dict, reservations):
env = os.environ.copy()
env['PDSH_RCMD_TYPE'] = 'ssh'
nodes_str = ""
for reservation in reservations:
nodes_str += f"{reservation.node.host},"
nodes_str = nodes_str[:-1]
logger.debug(f"Cleaning up exp_id = {exp['exp_id']} on the following workers: {nodes_str}")
# PDSH flags for max node fan out and specific hosts to launch on
# See https://linux.die.net/man/1/pdsh for flag details
pdsh_cmd = ['pdsh', '-f', str(PDSH_MAX_FAN_OUT), '-w', nodes_str]
kill_cmd = [
'pkill',
'-f',
exp['name'],
]
cmd = pdsh_cmd + kill_cmd
logger.debug("cmd = {}".format(' '.join(cmd)))
result = subprocess.Popen(cmd, env=env)
result.wait()
# In case of failure must propagate the error-condition back to the caller (usually shell). The
# actual error and traceback should have been printed in the subprocess, so in order to avoid
# unnecessary noise we just quietly exit here with the same code as the subprocess
if result.returncode > 0:
sys.exit(result.returncode)
logger.info(f"Done cleaning up exp_id = {exp['exp_id']} on the following workers: {nodes_str}")
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import shutil
import subprocess
import time
import datetime
import math
import hjson
from ..runtime.config_utils import dict_raise_error_on_duplicate_keys
from ..runtime.constants import *
from ..runtime.zero.config import ZERO_OPTIMIZATION, ZeroStageEnum
from ..utils import logger
from .config import DeepSpeedAutotuningConfig
from .constants import *
from .scheduler import ResourceManager
from .tuner import GridSearchTuner, RandomTuner, ModelBasedTuner
from .utils import *
from deepspeed.accelerator import get_accelerator
try:
from tabulate import tabulate
except ImportError:
tabulate = None
try:
import mlflow
has_mlflow = True
except Exception as e:
has_mlflow = False
ZERO_OPTIMIZATION_STAGE = "stage"
OFFLOAD_OPTIMIZER = "offload_optimizer"
OFFLOAD_PARAM = "offload_param"
ZERO_OPTIMIZATION_STAGE_DEFAULT = ZeroStageEnum.disabled
class Autotuner:
"""The DeepSpeed Autotuner automatically discovers the optimal DeepSpeed configuration that delivers good training speed. The Autotuner uses model information, system information, and heuristics to efficiently tune system knobs that affect compute and memory efficiencies, such as ZeRO optimization stages, micro-batch sizes, and many other ZeRO optimization configurations. It not only reduces the time and resources user spend on tuning, but also can discover configurations better than hand-tuned methods.
Autotuning with DeepSpeed requires no code change from DeepSpeed users. Please refer to the README for usage details.
"""
def __init__(self, args, active_resources):
self.args = args
self.selected_exp_dir = None
assert tabulate is not None, "Missing required package `tabulate`, please install with `pip install deepspeed[autotuning]`."
logger.debug(f"autotuning args={args}")
self.user_config = self._get_user_config(args.user_args)
assert self.user_config is not None, "DeepSpeed configuration is not provided"
self.autotuning_config = DeepSpeedAutotuningConfig(self.user_config)
if self.user_config[AUTOTUNING]:
if AUTOTUNING_EXPS_DIR in self.user_config[AUTOTUNING].keys():
del self.user_config[AUTOTUNING][AUTOTUNING_EXPS_DIR]
if AUTOTUNING_RESULTS_DIR in self.user_config[AUTOTUNING].keys():
del self.user_config[AUTOTUNING][AUTOTUNING_RESULTS_DIR]
self.exps_dir = self.autotuning_config.exps_dir
if self.autotuning_config.overwrite and os.path.exists(self.exps_dir):
shutil.rmtree(self.exps_dir, ignore_errors=True)
if not os.path.exists(self.exps_dir):
try:
os.makedirs(self.exps_dir, exist_ok=True)
logger.info(f"Created autotuning experiments directory: {self.exps_dir}")
except:
logger.error(
f"Failed to create {self.exps_dir}, please check `exps_dir` in the autotuning config file is accessible by all the nodes in the job."
)
exit(-1)
self.results_dir = self.autotuning_config.results_dir
if self.autotuning_config.overwrite and os.path.exists(self.results_dir):
shutil.rmtree(self.results_dir, ignore_errors=True)
if not os.path.exists(self.results_dir):
try:
os.makedirs(self.results_dir, exist_ok=True)
logger.info(f"Created autotuning results directory: {self.exps_dir}")
except:
logger.error(
f"Failed to create {self.results_dir}, please check `results_dir` in the autotuning config file is accessible by all the nodes in the job."
)
exit(-1)
# set the active resource for the autotuner resource manager
self.rm = self._get_resource_manager(active_resources)
# get resource requirement for each autotuning experiment
self.exp_num_nodes, self.exp_num_gpus = self._get_exp_resources(args)
assert self.exp_num_gpus <= self.rm.num_gpus_per_node, "num_gpus in the autotuning configuration must not be less than the --num_gpus value in the train script if any"
assert self.exp_num_nodes <= len(
self.rm.nodes
), "num_nodes in the autotuning configuration must not be less than the --num_nodes value in the train script if any"
self.records = {}
self.optimal_cmd = None
self.optimal_ds_config = None
self.mlflow_parent_id = None
def print_tuning_results(self):
"""Print the autotuning results in tabular format.
"""
best_space_records = self.get_best_space_records()
tab = []
if best_space_records:
for key, val in best_space_records.items():
if not val:
continue
row = []
row.append(key)
num_exps = 0
if key == GLOBAL_TUNING_SPACE:
cnt = 0
for k, v in best_space_records.items():
if k != GLOBAL_TUNING_SPACE:
cnt += v[2]
num_exps = cnt
else:
num_exps = val[2]
row.append(num_exps)
row.append(val[1])
row.append(val[0]['name'])
tab.append(row)
summary = tabulate(tab,
headers=["tuning_space", "num_experiments", "best_metric_val", "best_exp_name"],
tablefmt="pipe")
print(summary)
with open(os.path.join(self.results_dir, 'summary.txt'), 'w', buffering=BUFSIZE) as fd:
fd.write(summary)
fd.flush()
os.fsync(fd)
if GLOBAL_TUNING_SPACE in best_space_records:
best_exp, best_metric_val, total_num_exps = best_space_records[GLOBAL_TUNING_SPACE]
if best_exp:
logger.info(
f"{best_exp['name']} is the optimal setup after tuning. The exp result is at {best_exp['result_dir']}."
)
else:
logger.info(f"No optimal setup is found. Please check that experiments were run successfully.")
tuning_duration = datetime.timedelta(seconds=(time.time() - self.start_time))
logger.info(f"Tuning completed in {tuning_duration}")
with open(os.path.join(self.results_dir, 'summary.txt'), 'a') as f:
f.write(
f"\n\nTuning completed in {tuning_duration}. Total number of experiments: {self.rm.experiment_count - 1}."
)
f.flush()
def _get_user_config(self, user_args):
"""Get DeepSpeed configuration from the user arguments passed to the launcher.
Args:
user_args ([list]): user arguments passed to the DeepSpeed launcher
Returns:
[dict]: DeepSpeed configuration dictionary
"""
user_config_file = None
if "--deepspeed_config" in user_args:
idx = user_args.index("--deepspeed_config")
assert ".json" in user_args[
idx + 1], "DeepSpeed --deepspeed_config requires a json file to specify the configuration"
user_config_file = user_args[idx + 1]
elif "--deepspeed" in user_args:
idx = user_args.index("--deepspeed")
if ".json" in user_args[idx + 1]:
user_config_file = user_args[idx + 1]
logger.debug(f"user_config_file = {user_config_file}")
if user_config_file is not None:
assert os.path.isfile(user_config_file), "DeepSpeed configuration file: {} is not an existing file".format(
user_config_file)
if os.path.exists(user_config_file):
return json.load(open(user_config_file, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
return None
def _get_resource_manager(self, active_resources):
"""Initialize and return a resource manager
Args:
active_resources ([dict]): A dictionary of hostname and its slots (GPUs), e.g. {"worker-0": "0,1,2,3,4,5,6,7,8"}
Raises:
RuntimeError: raises the error if no GPU is available
Returns:
[ResourceManager]: A resource manager that schedules and runs autotuning experiments.
"""
logger.info(f"active_resources = {active_resources}")
hosts = []
ngpus_per_node = 100
for hostname, slots in active_resources.items():
hosts.append(hostname)
ngpus_per_node = min(len(slots), ngpus_per_node)
assert ngpus_per_node > 0, "no gpu is available"
return ResourceManager(args=self.args,
hosts=hosts,
num_gpus_per_node=ngpus_per_node,
results_dir=self.results_dir,
exps_dir=self.exps_dir,
arg_mappings=self.autotuning_config.arg_mappings)
def _get_exp_resources(self, args):
"""Get resource requirement for each autotuning experiment
Args:
args (dict): user args
Returns:
num_nodes, num_gpus: the number of gpus and number of nodes used in the autotuning experiments
"""
if args.num_nodes > 0:
num_nodes = args.num_nodes
else:
num_nodes = len(self.rm.nodes)
if args.num_gpus > 0:
num_gpus = args.num_gpus
else:
num_gpus = self.rm.num_gpus_per_node
return num_nodes, num_gpus
def metric(self):
return self.autotuning_config.metric
def fast_enabled(self):
return self.autotuning_config.fast
def max_train_batch_size(self):
return self.autotuning_config.max_train_batch_size
def mp_size(self):
return self.autotuning_config.mp_size
def max_train_micro_batch_size_per_gpu(self):
if self.max_train_batch_size(
) and self.max_train_batch_size() > 0: # if the user specifies a max_train_batch_size
max_train_micro_batch_size = self.max_train_batch_size() * self.mp_size() // (
self.exp_num_gpus * self.exp_num_nodes) # gradient accumulation steps >=1
return min(self.autotuning_config.max_train_micro_batch_size_per_gpu, max_train_micro_batch_size)
else:
return self.autotuning_config.max_train_micro_batch_size_per_gpu
def min_train_micro_batch_size_per_gpu(self):
return self.autotuning_config.min_train_micro_batch_size_per_gpu
def num_tuning_micro_batch_sizes(self):
return self.autotuning_config.num_tuning_micro_batch_sizes
def fp16_enabled(self):
if FP16 in self.user_config.keys():
return self.user_config[FP16].get(FP16_ENABLED, FP16_ENABLED_DEFAULT)
else:
return False
def get_gpu_memory_info(self):
return get_accelerator().total_memory()
def get_activation_memory_per_gpu(self):
if self.model_info and "activation_mem_per_gpu" in self.model_info:
return self.model_info["activation_mem_per_gpu"]
def get_instantiation_memory_required_per_gpu(self, zero_stage):
num_params = self.get_model_num_params()
total_gpus = self.exp_num_nodes * self.exp_num_gpus
fp16_enabled = self.fp16_enabled()
if not num_params:
return 0
# assume the model uses Adam optimizer
# ZeroStageEnum.disabled:
params_mem = num_params * (2 if fp16_enabled else 4)
gradients_mem = num_params * (2 if fp16_enabled else 4)
optimizer_mem = num_params * (16 if fp16_enabled else 8)
if zero_stage >= ZeroStageEnum.optimizer_states:
optimizer_mem = optimizer_mem / total_gpus
if zero_stage >= ZeroStageEnum.gradients:
gradients_mem = gradients_mem / total_gpus
if zero_stage >= ZeroStageEnum.weights:
params_mem = params_mem / total_gpus
mem_per_gpu = (params_mem + gradients_mem + optimizer_mem) / self.mp_size()
return mem_per_gpu
def _generate_experiments(self, tuning_space, max_train_batch_size_per_gpu):
"""Generates a list of autotuning experiments given a tuning_space.
The corresponding parameter values are replaced by user-defined values in the DeepSpeed configuration file.
Args:
tuning_space ([dict]): A DeepSpeed configuration dictionary where a value can be a list (called a tuning parameter). For example,
{
"zero_optimization": {
"stage": 1,
"reduce_bucket_size": [5e7,
5e8,
1e9],
"allgather_bucket_size": [5e7,
5e8,
1e9],
}
}
reduce_bucket_size and allgather_bucket_size are the tuning parameters in this tuning space.
Returns:
[list]: a list of experiments generated by taking combinations of values of the tuning space. The above tuning space generates 3*3 = 9 experiments if the user DeepSpeed configuration file does not overwrite the two tuning parameters or define more tuning parameters.
"""
exps = []
# each zero stage uses a different template configuration file
config_zero = tuning_space.get(ZERO_OPTIMIZATION, {})
stage = config_zero.get(ZERO_OPTIMIZATION_STAGE, ZERO_OPTIMIZATION_STAGE_DEFAULT)
template_config = {}
if stage == 0:
template_path = DEFAULT_TEMPLATE_PATH_ZERO_0
template_config = hjson.load(open(template_path, 'r'))
prefix = "z0_"
elif stage == 1:
template_path = DEFAULT_TEMPLATE_PATH_ZERO_1
template_config = hjson.load(open(template_path, 'r'))
prefix = "z1_"
elif stage == 2:
template_path = DEFAULT_TEMPLATE_PATH_ZERO_2
template_config = hjson.load(open(template_path, 'r'))
prefix = "z2_"
elif stage == 3:
template_path = DEFAULT_TEMPLATE_PATH_ZERO_3
template_config = hjson.load(open(template_path, 'r'))
model_info = self.model_info
if model_info and "hidden_size" in model_info:
hs = model_info["hidden_size"]
template_config[ZERO_OPTIMIZATION]['reduce_bucket_size'] = hs * hs
template_config[ZERO_OPTIMIZATION]['stage3_prefetch_bucket_size'] = 0.9 * hs * hs
template_config[ZERO_OPTIMIZATION]['stage3_param_persistence_threshold'] = 10 * hs
prefix = "z3_"
else:
return exps
# replace the corresponding parameter values if the user specifies them in the DeepSpeed configuration file
replace_dict(tuning_space, self.user_config, [ZERO_OPTIMIZATION, TRAIN_MICRO_BATCH_SIZE_PER_GPU])
logger.debug(f"tuning_space = {json.dumps(tuning_space)}")
all_configs = get_all_configs(tuning_space, ignore_keys=["optimizer"])
tuning_keys = get_tuning_keys(tuning_space)
logger.debug(f"tuning_keys = {tuning_keys}")
logger.debug(f"before pruning total configs = {len(all_configs)}")
pruned_list = prune_configs(all_configs)
logger.debug(f"after pruning total configs = {len(pruned_list)}")
for config in pruned_list:
exp_config = copy.deepcopy(template_config)
# fill the template with the expr config
replace_dict(exp_config, config)
# if the config does not use offloading, remove the offloading section
config_zero = config.get(ZERO_OPTIMIZATION, None)
if config_zero:
if OFFLOAD_OPTIMIZER not in config_zero and OFFLOAD_OPTIMIZER in exp_config[ZERO_OPTIMIZATION]:
del exp_config[ZERO_OPTIMIZATION][OFFLOAD_OPTIMIZER]
if OFFLOAD_PARAM not in config_zero and OFFLOAD_PARAM in exp_config[ZERO_OPTIMIZATION]:
del exp_config[ZERO_OPTIMIZATION][OFFLOAD_PARAM]
# set gradient accumulation steps according to max_train_batch_size_per_gpu
mbs = exp_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU]
gas = max_train_batch_size_per_gpu // mbs
exp_config[GRADIENT_ACCUMULATION_STEPS] = gas
exp_config[TRAIN_BATCH_SIZE] = mbs * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp = {}
# generate the expr name
exp_name = canonical_name(exp_config, tuning_keys, prefix)
exp['name'] = exp_name
exp[DS_CONFIG] = exp_config
exp['num_gpus'] = self.exp_num_gpus
exp['num_nodes'] = self.exp_num_nodes
exps.append(exp)
return exps
def tune(self):
""" Tunes Zero stages, micro batch size per GPU, and other Zero configurations. Performance metrics of different tuning spaces are recorded in self.records.
"""
if has_mlflow:
self.mlflow_parent_id = os.environ['MLFLOW_RUN_ID']
mlflow.start_run(run_id=self.mlflow_parent_id)
self.start_time = time.time()
if self.fast_enabled():
logger.info(f"Fast mode is enabled. Tuning micro batch size only.")
# model info profile run with DEFAULT_MIN_MEM_CONFIG
model_info = self.model_info_profile_run()
if model_info:
self.model_info = model_info
else:
return
logger.info(f"The model has {number_to_string(self.get_model_num_params())} parameters.")
self.gpu_mem = self.get_gpu_memory_info()
logger.info(f"Memory per GPU in the system is {memory_to_string(self.gpu_mem, postfix='B')}.")
self.activation_mem = self.get_activation_memory_per_gpu()
logger.info(
f"The model requires at least {memory_to_string(self.activation_mem, postfix='B')} activation memory for micro batch size 1."
)
#TODO: FIX THIS
stage = self.user_config.get(ZERO_OPTIMIZATION, {}).get(ZERO_OPTIMIZATION_STAGE, "all")
user_zero_stages = [stage] if not isinstance(stage, list) else stage
logger.info(f"User-defined zero stages are {stage}.")
mbs = 0
max_mbs = 0
metric_val = 0
required_gpu_mem = self.get_instantiation_memory_required_per_gpu(ZeroStageEnum.disabled) + self.activation_mem
if self.gpu_mem > required_gpu_mem:
if "all" in user_zero_stages or ZeroStageEnum.disabled in user_zero_stages:
logger.info(
f"The model might be runable with ZERO 0 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1), adding DEFAULT_TUNING_SPACE_ZERO_0 to the global tuning space"
)
next_max_mbs, next_mbs, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_0)
if next_mbs > mbs:
mbs = next_mbs
max_mbs = next_max_mbs
metric_val = next_metric_val
if has_mlflow:
mlflow.log_metric(f"z0{self.metric()}", next_metric_val)
else:
logger.info(
f"The model is not runable with ZERO stage {ZeroStageEnum.disabled} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)"
)
required_gpu_mem = self.get_instantiation_memory_required_per_gpu(
ZeroStageEnum.optimizer_states) + self.activation_mem
if self.gpu_mem > required_gpu_mem:
if "all" in user_zero_stages or ZeroStageEnum.optimizer_states in user_zero_stages:
logger.info(
f"The model might be runable with ZERO 1 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory), adding DEFAULT_TUNING_SPACE_ZERO_1 to the global tuning space"
)
next_max_mbs, next_mbs, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_1,
prev_max_mbs=max_mbs,
prev_best_mbs=mbs,
prev_best_metric_val=metric_val)
if next_mbs > mbs:
mbs = next_mbs
max_mbs = next_max_mbs
metric_val = next_metric_val
if has_mlflow:
mlflow.log_metric(f"z1{self.metric()}", next_metric_val)
else:
logger.info(
f"The model is not runable with ZERO stage {ZeroStageEnum.optimizer_states} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)"
)
required_gpu_mem = self.get_instantiation_memory_required_per_gpu(
ZeroStageEnum.gradients) + self.activation_mem
if self.gpu_mem > required_gpu_mem:
if "all" in user_zero_stages or ZeroStageEnum.gradients in user_zero_stages:
logger.info(
f"The model might be runable with ZERO 2 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory), adding DEFAULT_TUNING_SPACE_ZERO_2 to the global tuning space"
)
next_max_mbs, next_mbs, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_2,
prev_max_mbs=max_mbs,
prev_best_mbs=mbs,
prev_best_metric_val=metric_val)
if next_mbs > mbs:
mbs = next_mbs
max_mbs = next_max_mbs
metric_val = next_metric_val
if has_mlflow:
mlflow.log_metric(f"z2{self.metric()}", next_metric_val)
else:
logger.info(
f"The model is not runable with ZERO stage {ZeroStageEnum.gradients} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)"
)
required_gpu_mem = self.get_instantiation_memory_required_per_gpu(ZeroStageEnum.weights) + self.activation_mem
if self.gpu_mem > required_gpu_mem:
if "all" in user_zero_stages or ZeroStageEnum.weights in user_zero_stages:
logger.info(
f"The model might be runable with ZERO 3 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory), adding DEFAULT_TUNING_SPACE_ZERO_3 to the global tuning space"
)
_, _, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_3,
prev_max_mbs=max_mbs,
prev_best_mbs=mbs,
prev_best_metric_val=metric_val)
if has_mlflow:
mlflow.log_metric(f"z3{self.metric()}", next_metric_val)
else:
logger.info(
f"The model has {self.get_model_num_params()} parameters and requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory per GPU with DeepSpeed Zero stage {ZeroStageEnum.weights} optimization. Memory per GPU in system is {memory_to_string(self.gpu_mem)}. No tuning is performed."
)
return
if has_mlflow:
mlflow.end_run()
def tune_space(self, tuning_space, prev_max_mbs=0, prev_best_mbs=0, prev_best_metric_val=0):
config_zero = tuning_space.get(ZERO_OPTIMIZATION, {})
stage = config_zero.get(ZERO_OPTIMIZATION_STAGE, None)
tuning_space_name = TUNING_MICRO_BATCH_SIZE_PREFIX + str(stage)
tuning_micro_batch_sizes = []
max_train_batch_size_per_gpu = 0
tuning_micro_batch_sizes_overwritten = False
# calculate max micro batch size using gpu memory, model instantiation memory and activation memory
# calculated_max_micro_batch_size = (memory_per_gpu - instantiation_memory) // activation_memory_micro_batch_size_1
calculated_max_micro_batch_size = int(
self.gpu_mem - self.get_instantiation_memory_required_per_gpu(stage)) // self.activation_mem
logger.info(
f"Start tuning for space {tuning_space_name}, calculated_max_micro_batch_size = {calculated_max_micro_batch_size}"
)
if calculated_max_micro_batch_size < prev_max_mbs:
logger.info(f"No need to tune Zero stage {stage}. End tuning for space {tuning_space_name}")
return 0, 0, 0
if TRAIN_MICRO_BATCH_SIZE_PER_GPU in self.user_config and isinstance(
self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU], list):
# user-specified micro batch size per gpu is a list which overwrites the default tuning behavior
tuning_micro_batch_sizes = [
s for s in self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] if isinstance(s, int)
]
gas = self.get_gas_from_user_config()
min_micro_batch_size = min(tuning_micro_batch_sizes)
max_micro_batch_size = max(tuning_micro_batch_sizes)
max_train_batch_size_per_gpu = max_micro_batch_size * gas
tuning_micro_batch_sizes_overwritten = True
else:
# auto-detects the list of micro batch sizes to tune
min_micro_batch_size, max_micro_batch_size = self.get_min_max_micro_batch_size(
stage, prev_max_mbs, calculated_max_micro_batch_size)
if max_micro_batch_size < prev_max_mbs:
logger.info(f"No need to tune Zero stage {stage}. End tuning for space {tuning_space_name}")
return 0, 0, 0
tuning_micro_batch_sizes, max_train_batch_size_per_gpu = self.get_tuning_micro_batch_size_list(
min_micro_batch_size,
max_micro_batch_size,
num_tuning_micro_batch_sizes=self.num_tuning_micro_batch_sizes())
logger.info(
f"tuning_micro_batch_sizes = {tuning_micro_batch_sizes}, max_train_batch_size_per_gpu = {max_train_batch_size_per_gpu}"
)
# return if the tuning_micro_batch_sizes list is empty
if not tuning_micro_batch_sizes:
logger.info(f"End tuning for space {tuning_space_name}")
return 0, 0, 0
# tune micro batch sizes and gradient accumulation steps given max_train_batch_size_per_gpu
tuning_micro_batch_sizes = self.run_tuning_micro_batch_sizes(tuning_micro_batch_sizes,
max_train_batch_size_per_gpu,
min_micro_batch_size, stage,
tuning_micro_batch_sizes_overwritten)
fast_best_record = self.get_best_space_record(tuning_space_name)
fast_best_metric_val = fast_best_record[1] if fast_best_record else 0
fast_best_mbs = fast_best_record[0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU] if fast_best_record else 0
logger.info(f"fast_best_mbs = {fast_best_mbs}, name = {fast_best_record[0]['name']}")
if self.fast_enabled() or stage == 0:
logger.info(f"End tuning for space: {tuning_space_name}")
return max_micro_batch_size, fast_best_mbs, fast_best_metric_val
# if the best metric or the micro batch size for that best metric in the current Zero stage after tuning micro batch size is less than the corresponding value in the previous Zero stage, return, do not tune other Zero configuration parameters
if stage > 0:
if fast_best_mbs <= prev_best_mbs or fast_best_metric_val < prev_best_metric_val:
logger.info(
f"End tuning for space: {tuning_space_name}. No need to tune other Zero configuration parameters.")
return max_micro_batch_size, fast_best_mbs, fast_best_metric_val
tuning_space[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = tuning_micro_batch_sizes
tuning_space_name = canonical_name(tuning_space,
tuning_keys=get_tuning_keys(tuning_space),
prefix="z" + str(stage) + "_",
omit_val=True)
logger.info(f'Tuning space is {tuning_space}')
logger.info(f'Tuning space name is {tuning_space_name}')
exps = self._generate_experiments(tuning_space, max_train_batch_size_per_gpu)
logger.info(f'Tuner type is {self.autotuning_config.tuner_type}')
if self.autotuning_config.tuner_type == AUTOTUNING_TUNER_MODELBASED:
t = ModelBasedTuner(exps, self.rm, self.metric(), tuning_space)
elif self.autotuning_config.tuner_type == AUTOTUNING_TUNER_RANDOM:
t = RandomTuner(exps, self.rm, self.metric())
else:
t = GridSearchTuner(exps, self.rm, self.metric())
sample_size = len(self.rm.nodes) * self.rm.num_gpus_per_node // (self.exp_num_gpus * self.exp_num_nodes)
num_exps = t.tune(sample_size=sample_size,
n_trials=self.autotuning_config.tuner_num_trials,
early_stopping=self.autotuning_config.tuner_early_stopping)
exp = t.best_exp
metric_val = t.best_metric_val
if exp:
self.update_records(tuning_space_name, exp, metric_val, num_exps)
full_best_record = self.get_best_space_record(tuning_space_name)
full_best_metric_val = full_best_record[1] if full_best_record else -1
if full_best_metric_val > fast_best_metric_val:
best_metric_val = full_best_metric_val
best_mbs = full_best_record[0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU] if full_best_record else -1
else:
best_metric_val = fast_best_metric_val
best_mbs = fast_best_mbs
logger.info(f"End tuning for space: {tuning_space_name}")
return max_micro_batch_size, best_mbs, best_metric_val
def get_plauteu_mbs(self, tuning_space_name):
if tuning_space_name not in self.records:
return 0
space_records = self.records[tuning_space_name]
sorted_space_records = sorted(space_records, key=lambda x: x[0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU])
prev_metric_val = None
prev_micro_batch_size = 0
for (exp, metric_val, _) in sorted_space_records:
if prev_metric_val:
if metric_val < prev_metric_val:
break
if (metric_val >= prev_metric_val
and (metric_val - prev_metric_val) / prev_metric_val < METRIC_PERCENT_DIFF_CONST):
break
prev_metric_val = metric_val
prev_micro_batch_size = exp[DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU]
plateau_mbs = prev_micro_batch_size
return plateau_mbs
def get_model_num_params(self):
if self.model_info and "num_params" in self.model_info:
return self.model_info["num_params"]
def model_info_profile_run(self):
"""Does a model information profiling experiment that collects the number of model parameters and activation memory.\
The experiment produces a "profile_model_info" folder under self.results_dir.
Returns:
[dict]: a model information dictionary, e.g., {"num_params": 335144976, "trainable_num_params": 335144976, "activation_mem_per_gpu": 324358144, "rank": 0}
"""
logger.info("Starting model info profile run.")
model_info = self.autotuning_config.model_info
if model_info and MODEL_INFO_NUM_PARAMS in model_info:
return model_info
ds_config = copy.deepcopy(self.user_config)
replace_dict(ds_config, DEFAULT_MIN_MEM_CONFIG)
model_info_path = os.path.join(self.results_dir, "profile_model_info", "model_info.json")
ds_config[AUTOTUNING] = {"enabled": True, "model_info_path": model_info_path, "model_info": {"profile": True}}
exp_config = {}
exp_name = "profile_model_info"
exp_config['name'] = exp_name
exp_config[DS_CONFIG] = ds_config
exp_config['num_gpus'] = self.exp_num_gpus
exp_config['num_nodes'] = self.exp_num_nodes
exp_path = os.path.join(self.exps_dir, f'{exp_name}.json')
with open(exp_path, 'w', buffering=BUFSIZE) as fd:
json.dump(exp_config, fd)
fd.flush()
os.fsync(fd)
self.rm.schedule_experiments([exp_path])
self.rm.run()
for exp_id, (exp_json, err) in self.rm.finished_experiments.items():
self.rm.clear()
if err:
logger.error(f"The model is not runnable with DeepSpeed with error = {err}")
return None
if os.path.exists(model_info_path):
with open(model_info_path, 'r') as f:
model_info = hjson.load(f)
return model_info
def update_records(self, space_name, exp, metric_val, num_exps):
if space_name not in self.records:
self.records[space_name] = [(exp, metric_val, num_exps)]
else:
self.records[space_name].append((exp, metric_val, num_exps))
def get_best_space_record(self, space_name):
if space_name not in self.records:
return None
space_records = self.records[space_name]
best_space_record = None
space_num_exps = 0
for (exp, metric_val, num_exps) in space_records:
space_num_exps += num_exps
if best_space_record is None or metric_val > best_space_record[1]:
best_space_record = (exp, metric_val)
if best_space_record:
best_space_record = best_space_record + (space_num_exps, )
return best_space_record
def get_best_space_records(self):
best_space_records = {}
global_best_record = None
for space_name, space_records in self.records.items():
best_space_record = self.get_best_space_record(space_name)
if best_space_record:
best_space_records[space_name] = best_space_record
if not global_best_record or best_space_record[1] > global_best_record[1]:
global_best_record = best_space_record
if global_best_record:
best_space_records[GLOBAL_TUNING_SPACE] = global_best_record
return best_space_records
def run_tuning_micro_batch_sizes(self, tuning_micro_batch_sizes, max_train_batch_size_per_gpu,
min_micro_batch_size, stage, tuning_micro_batch_sizes_overwritten):
assert tuning_micro_batch_sizes, "the tuning micro batch size list is empty"
tuning_micro_batch_sizes.sort()
max_micro_batch_size = tuning_micro_batch_sizes[-1]
max_micro_batch_size_metric_val = 0
ds_config = get_first_config(self.user_config)
ds_config[ZERO_OPTIMIZATION] = {ZERO_OPTIMIZATION_STAGE: stage}
tuning_space_name = TUNING_MICRO_BATCH_SIZE_PREFIX + str(stage)
exp_paths = []
for mbs in tuning_micro_batch_sizes:
ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs
gas = max_train_batch_size_per_gpu // mbs
ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs)
exp_config = {}
exp_config['name'] = exp_name
exp_config[DS_CONFIG] = ds_config
exp_config['num_gpus'] = self.exp_num_gpus
exp_config['num_nodes'] = self.exp_num_nodes
exp_path = os.path.join(self.exps_dir, f'{exp_name}.json')
with open(exp_path, 'w', buffering=BUFSIZE) as fd:
json.dump(exp_config, fd)
fd.flush()
os.fsync(fd)
exp_paths.append(exp_path)
self.rm.schedule_experiments(exp_paths)
self.rm.run()
for exp_id, (exp, err) in self.rm.finished_experiments.items():
if exp:
metric_file = exp[DS_CONFIG][AUTOTUNING][AUTOTUNING_METRIC_PATH]
if os.path.exists(metric_file):
with open(metric_file, 'r') as f:
results = hjson.load(f)
metric_val = results[self.metric()]
self.update_records(tuning_space_name, exp, metric_val, 1)
if max_micro_batch_size == exp[DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU]:
max_micro_batch_size_metric_val = metric_val
if has_mlflow:
os.environ.pop('MLFLOW_RUN_ID')
mlflow.start_run(nested=True, run_name=exp['name'])
for metric in results:
mlflow.log_metric(metric, results[metric])
mlflow.end_run()
os.environ['MLFLOW_RUN_ID'] = self.mlflow_parent_id
else:
self.update_records(tuning_space_name, exp, 0, 1)
else:
mbs = exp[DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU]
logger.info(f"micro batch size = {mbs} was not run successfully")
self.rm.clear()
if tuning_micro_batch_sizes_overwritten:
return tuning_micro_batch_sizes
# in a auto-detected tuning_micro_batch_sizes list, max_micro_batch_size might not be performant as the memory consumption is close to max
# try smaller values while gas stays the same
# if finding a more performant mbs value, use it to replace max_micro_batch_size in the list
min_micro_batch_size_with_same_gas = (tuning_micro_batch_sizes[-2] +
1) if len(tuning_micro_batch_sizes) > 1 else min_micro_batch_size
prev_best_metric_val = max_micro_batch_size_metric_val
prev_best_mbs = max_micro_batch_size
stride = (max_micro_batch_size - min_micro_batch_size_with_same_gas) // 3
if stride == 0:
stride = 1
for mbs in reversed(range(min_micro_batch_size_with_same_gas, max_micro_batch_size, stride)):
ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs
gas = max_train_batch_size_per_gpu // mbs
ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs)
exp, metric_val = self.run_ds_config(ds_config, exp_name)
if metric_val:
with open(metric_file, 'r') as f:
results = hjson.load(f)
metric_val = results[self.metric()]
if has_mlflow:
os.environ.pop('MLFLOW_RUN_ID')
mlflow.start_run(nested=True, run_name=exp_name)
for metric in results:
mlflow.log_metric(metric, results[metric])
mlflow.end_run()
os.environ['MLFLOW_RUN_ID'] = self.mlflow_parent_id
self.update_records(tuning_space_name, exp, metric_val, 1)
if metric_val > prev_best_metric_val * (1 + METRIC_PERCENT_DIFF_CONST):
prev_best_metric_val = metric_val
prev_best_mbs = mbs
else:
break
else:
self.update_records(tuning_space_name, exp, 0, 1)
break
if prev_best_mbs != max_micro_batch_size:
tuning_micro_batch_sizes[-1] = prev_best_mbs
return tuning_micro_batch_sizes
def get_min_max_micro_batch_size(self, stage, min_micro_batch_size, calculated_max_micro_batch_size):
# get min and max micro batch size with gradient accumulation steps = 1
if min_micro_batch_size > calculated_max_micro_batch_size:
return -1, -1
used_micro_batch_sizes = []
tuning_space_name = TUNING_MICRO_BATCH_SIZE_PREFIX + str(stage)
ds_config = get_first_config(self.user_config)
ds_config[ZERO_OPTIMIZATION] = {ZERO_OPTIMIZATION_STAGE: stage}
gas = self.get_gas_from_user_config()
ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
# search for the min micro batch size
if min_micro_batch_size < 1:
if TRAIN_MICRO_BATCH_SIZE_PER_GPU in self.user_config and isinstance(
self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU], int):
# user specifies train_micro_batch_size_per_gpu as an int
mbs = int(self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU])
else:
# user does not specify train_micro_batch_size_per_gpu or sets it to "auto" when using Hugging Face
val = self.get_val_from_user_args(TRAIN_MICRO_BATCH_SIZE_PER_GPU)
if val:
mbs = int(val)
else:
mbs = 1
assert mbs > 0, "The micro batch size per GPU must be greater than 0."
ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs
ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs)
exp, metric_val = self.run_ds_config(ds_config, exp_name)
if metric_val:
self.update_records(tuning_space_name, exp, metric_val, 1)
used_micro_batch_sizes.append(mbs)
min_micro_batch_size = mbs
else:
self.update_records(tuning_space_name, exp, 0, 1)
logger.info(f"User-specified micro batch size per GPU {mbs} does not run")
if self.min_train_micro_batch_size_per_gpu() == mbs:
return -1, -1
mbs = self.min_train_micro_batch_size_per_gpu()
ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs
ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs)
exp, metric_val = self.run_ds_config(ds_config, exp_name)
if not metric_val:
self.update_records(tuning_space_name, exp, 0, 1)
logger.info(f"min_train_micro_batch_size_per_gpu {mbs} is not runnable.")
return -1, -1
self.update_records(tuning_space_name, exp, metric_val, 1)
min_micro_batch_size = mbs
used_micro_batch_sizes.append(mbs)
else:
ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = min_micro_batch_size
ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
ds_config[TRAIN_BATCH_SIZE] = min_micro_batch_size * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(min_micro_batch_size)
exp, metric_val = self.run_ds_config(ds_config, exp_name)
if metric_val:
self.update_records(tuning_space_name, exp, metric_val, 1)
used_micro_batch_sizes.append(min_micro_batch_size)
else:
self.update_records(tuning_space_name, exp, 0, 1)
return -1, -1
# search for the max micro batch size
max_micro_batch_size = min(calculated_max_micro_batch_size, self.max_train_micro_batch_size_per_gpu())
for mbs in [math.ceil(1.05 * max_micro_batch_size), max_micro_batch_size, int(0.95 * max_micro_batch_size)]:
if mbs > self.max_train_micro_batch_size_per_gpu():
continue
if mbs in used_micro_batch_sizes:
return min_micro_batch_size, mbs
ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs
ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs)
exp, metric_val = self.run_ds_config(ds_config, exp_name)
if metric_val:
logger.info(f"mbs = {mbs} is found as max mbs")
self.update_records(tuning_space_name, exp, metric_val, 1)
used_micro_batch_sizes.append(mbs)
return min_micro_batch_size, mbs
else:
self.update_records(tuning_space_name, exp, 0, 1)
space_records = self.records[tuning_space_name] if tuning_space_name in self.records else []
if space_records:
prev_idx = min(range(len(space_records)),
key=lambda i: abs(space_records[i][0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU] -
min_micro_batch_size))
prev_metric_val = space_records[prev_idx][1]
else:
prev_metric_val = None
low = min_micro_batch_size
high = max_micro_batch_size
# binary search until low is the smallest micro batch size that OOMs.
while low <= high:
mid = int((low + high) // 2)
logger.debug(f"trying mbs = {mid}, low = {low}, high = {high}")
if mid not in used_micro_batch_sizes:
ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mid
ds_config[TRAIN_BATCH_SIZE] = mid * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mid)
exp, metric_val = self.run_ds_config(ds_config, exp_name)
if metric_val:
low = mid + 1
self.update_records(tuning_space_name, exp, metric_val, 1)
used_micro_batch_sizes.append(mid)
if prev_metric_val and (
(metric_val - prev_metric_val) / prev_metric_val) < METRIC_PERCENT_DIFF_CONST:
logger.info(f"performance plateaus at mbs = {low}")
break
prev_metric_val = metric_val
else:
self.update_records(tuning_space_name, exp, 0, 1)
high = mid - 1
else:
low = mid + 1
max_micro_batch_size = low - 1
logger.info(f"min_micro_batch_size = {min_micro_batch_size}, max_micro_batch_size = {max_micro_batch_size}.")
return min_micro_batch_size, max_micro_batch_size
def get_gas_from_user_config(self):
gas = 1
if GRADIENT_ACCUMULATION_STEPS in self.user_config:
gas_in_config = self.user_config[GRADIENT_ACCUMULATION_STEPS]
if isinstance(gas_in_config, int):
gas = gas_in_config
elif gas_in_config == "auto": # GRADIENT_ACCUMULATION_STEPS: "auto"
val = self.get_val_from_config(GRADIENT_ACCUMULATION_STEPS)
if val:
gas = int(val)
elif isinstance(gas_in_config, list):
logger.info(
f"Specifying a list of {GRADIENT_ACCUMULATION_STEPS} to tune is not supported. 1 would be used.")
assert gas > 0, "Gradient accumulation steps must be positive."
return gas
def get_val_from_user_args(self, ds_name):
arg_mappings = self.autotuning_config.arg_mappings
user_args = self.args.user_args
if arg_mappings and ds_name in arg_mappings:
arg_name = arg_mappings[ds_name]
if arg_name in user_args:
idx = user_args.index(arg_name)
if user_args[idx + 1].isnumeric():
return (user_args[idx + 1])
return None
def get_tuning_micro_batch_size_list(self, min_micro_batch_size, max_micro_batch_size,
num_tuning_micro_batch_sizes):
"""Get a list of micro batch sizes to tune based on min and max values, as well as the size of the list.
Args:
min_micro_batch_size ([int]): min micro batch size per GPU
max_micro_batch_size ([int]): max micro batch size per GPU
num_tuning_micro_batch_sizes (int): the number of items in the returned list
Returns:
[list]: a list of micro batch sizes to tune.
"""
if min_micro_batch_size <= 0 or max_micro_batch_size <= 0:
logger.info(
f"min_micro_batch_size = {min_micro_batch_size}, max_micro_batch_size = {max_micro_batch_size}")
return [], 0
# NUM_GPUS=$(( ${NUM_WORKERS} * ${NUM_GPUS_PER_WORKER} ))
# DP_SIZE=$(( ${NUM_GPUS} / (${PP_SIZE} * ${MP_SIZE}) ))
# GRAD_ACC_STEPS=$(( ${TARGET_GLOBAL_BATCH_SIZE} / (${BATCH_SIZE} * ${DP_SIZE}) ))
if self.max_train_batch_size(
) and self.max_train_batch_size() > 0: # if the user specifies a max_train_batch_size
max_train_batch_size_per_gpu = self.max_train_batch_size() * self.mp_size() // (self.exp_num_gpus *
self.exp_num_nodes)
else:
gas = self.get_gas_from_user_config()
max_train_batch_size_per_gpu = max_micro_batch_size * gas // self.mp_size()
logger.info(f"max_train_batch_size_per_gpu = {max_train_batch_size_per_gpu}")
if min_micro_batch_size < max_micro_batch_size // 2:
min_micro_batch_size = max_micro_batch_size // 2
# constant stride
stride = (max_micro_batch_size - min_micro_batch_size) // num_tuning_micro_batch_sizes
if stride == 0:
stride = 1
ls = []
min_gas = max_train_batch_size_per_gpu // max_micro_batch_size
# if gas is the same as min_gas, do not add mbs to the tuning list
for mbs in range(min_micro_batch_size, max_micro_batch_size, stride):
if max_train_batch_size_per_gpu // mbs != min_gas:
ls.append(mbs)
ls.append(max_micro_batch_size)
return ls, max_train_batch_size_per_gpu
def run_ds_config(self, ds_config, exp_name):
exp_config = {}
exp_config['name'] = exp_name
exp_config[DS_CONFIG] = ds_config
exp_config['num_gpus'] = self.exp_num_gpus
exp_config['num_nodes'] = self.exp_num_nodes
exp_path = os.path.join(self.exps_dir, f'{exp_name}.json')
logger.debug(f'run_ds_config exp_name = {exp_name}')
with open(exp_path, 'w', buffering=BUFSIZE) as fd:
json.dump(exp_config, fd)
fd.flush()
os.fsync(fd)
self.rm.schedule_experiments([exp_path])
self.rm.run()
exp, metric_val = self.rm.parse_results(self.metric())
self.rm.clear()
return exp, metric_val
def write_optimal_config(self):
best_space_records = self.get_best_space_records()
if GLOBAL_TUNING_SPACE not in best_space_records:
return
best_exp, best_metric_val, _ = best_space_records[GLOBAL_TUNING_SPACE]
if best_exp:
exp_dir = best_exp["result_dir"]
cmd = None
with open(os.path.join(exp_dir, "cmd.txt"), "r") as f:
cmd = [str(i) for i in f.read().split()]
ds_config = hjson.load(open(os.path.join(exp_dir, "ds_config.json"), "r"))
ds_config.pop(AUTOTUNING)
ds_config_path = os.path.join(self.results_dir, "ds_config_optimal.json")
json.dump(ds_config, open(ds_config_path, "w"))
cmd_path = os.path.join(self.results_dir, "cmd_optimal.txt")
with open(cmd_path, "w") as fd:
fd.write(" ".join(cmd))
fd.write("\n")
fd.flush()
self.optimal_cmd = cmd
self.optimal_ds_config = ds_config
logger.info(
f"Wrote the optimal DeepSpeed configuration found by autotuning to {ds_config_path}, and the corresponding DeepSpeed command to {cmd_path}"
)
def run_after_tuning(self):
""" Launches the training with the optimal DeepSpeed configuration found through the autotuning process.
"ds_config_optimal.json" describing the optimal DeepSpeed configuration as well the command used to launch training "cmd_optimal.txt" are saved to self.results_dir.
"""
if self.optimal_cmd:
result = subprocess.Popen(self.optimal_cmd)
result.wait()
logger.info(f"Done running with the optimal DeepSpeed configuration using {self.optimal_cmd}")
else:
logger.info(f"No optimal DeepSpeed configuration found by autotuning.")
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import sys
from deepspeed.autotuning.constants import *
from deepspeed.autotuning.utils import write_experiments
from deepspeed.utils import logger
class BaseTuner:
def __init__(self, exps, resource_manager, metric):
self.all_exps = exps
self.rm = resource_manager
self.best_iter = 0
self.best_exp = None
self.best_metric_val = None
self.metric = metric if metric else AUTOTUNING_METRIC_DEFAULT
logger.info(f"total number of exps = {len(self.all_exps)}")
def has_next(self):
"""Whether there exists more configurations for evaluation"""
if len(self.all_exps) > 0:
return True
else:
return False
def next_batch(self, sample_size):
"""Select the next batch of configurations for evaluation"""
raise NotImplementedError
def update(self):
""""Update the tuner with what configurations have been evaluated and their performance results"""
def tune(self, sample_size=1, n_trials=1000, early_stopping=None):
i = 0
try:
while i < n_trials and self.has_next():
# Select the next batch of configuration for evaluation
sampled_exps = self.next_batch(sample_size)
# Generate experiments for measurement of performance
exp_paths = write_experiments(sampled_exps, self.rm.exps_dir)
self.rm.schedule_experiments(exp_paths)
self.rm.run()
exp, metric_val = self.rm.parse_results(self.metric)
if self.best_exp == None or self.best_metric_val == None or (metric_val
and metric_val > self.best_metric_val):
# logger.info(f"tuner finds better = {exp}")
self.best_exp = exp
self.best_metric_val = metric_val
self.best_iter = i
i += len(sampled_exps)
# Update the tuner with evaluated performance results
self.update()
self.rm.clear()
# Early stop if no more promising configurations are likely to be found
if early_stopping and i >= self.best_iter + early_stopping:
logger.info(
f"Tuner early stopped at iteration {i}. Best iteration is {self.best_iter}. Early stopping threshold is {early_stopping}"
)
break
return i
except:
logger.info("Tuner Error:", sys.exc_info()[0])
return i
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .index_based_tuner import RandomTuner, GridSearchTuner
# from .ga_tuner import GATuner
from .model_based_tuner import ModelBasedTuner
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import hjson
from ..constants import AUTOTUNING, AUTOTUNING_METRIC_PATH
from .base_tuner import BaseTuner
from .cost_model import XGBoostCostModel
from .utils import *
from ..utils import *
import numbers
from ..constants import AUTOTUNING_METRIC_LATENCY
INIT_NUM = 2
class ModelBasedTuner(BaseTuner):
"""Exploring the search space with a cost model"""
def __init__(self, exps: list, resource_manager, metric, tuning_sapce):
super().__init__(exps, resource_manager, metric)
self.tuning_space = tuning_sapce
self.best_iter = 0
self.all_configs = [e['ds_config'] for e in exps]
self.num_all_configs = len(self.all_configs)
self.dims = dict_to_dims(self.tuning_space)
logger.info(f"Create config dim: {self.dims}, all configs: {self.num_all_configs}")
self.visited = set([])
self.trials = []
self.trial_pt = 0
init_num = min(INIT_NUM, self.num_all_configs)
for _ in range(init_num):
exp_feature = np.random.randint(self.num_all_configs)
exp_feature = 0
while exp_feature in self.visited:
exp_feature = np.random.randint(self.num_all_configs)
self.trials.append(exp_feature)
self.visited.add(exp_feature)
self.cost_model = XGBoostCostModel("rank")
self.evaluated_configs = []
self.evaluated_perf = []
self.train_ct = 0
self.random_exploration_ratio = 0.2 # do random exploration
def find_estimated_top_configs(self):
"""Use the cost model to predict the estimated performance of configurations and find the top ones for the next round of evaluation"""
configs = []
for c in self.all_configs:
flattened_ds_config = flatten(c)
feature_val = []
for k, v in flattened_ds_config.items():
if isinstance(v, numbers.Number):
feature_val.append(v)
configs.append(feature_val)
# print(configs)
# TODO the current implementation requires that all configs have the same shape.
configs = np.array(configs, dtype=np.float32)
estimates = self.cost_model.predict(configs)
n = len(estimates)
top_idx = np.argsort(estimates)
top_idx_ret = top_idx if self.metric == AUTOTUNING_METRIC_LATENCY else top_idx[::-1][:n]
# top_configs = [self.all_configs[i] for i in top_idx]
return top_idx_ret
def next_batch(self, sample_size):
sampled_batch = []
counter = 0
while counter < sample_size:
if len(self.visited) >= self.num_all_configs:
break
while self.trial_pt < len(self.trials):
logger.debug(f"trials: {self.trials}")
# Select top promising trials
index = self.trials[self.trial_pt]
if index not in self.visited:
break
self.trial_pt += 1
# To avoid over-exploitation, randomly select one that has not been explored.
rand = np.random.rand()
if rand < self.random_exploration_ratio:
# Do normal selection
feature = np.random.choice(self.trials)
while index in self.visited:
index = np.random.randint(self.num_all_configs)
# Need to track both the sampled configs and indices
sampled_batch.append(self.all_exps[index])
self.visited.add(index)
counter += 1
return sampled_batch
def has_next(self):
return len(self.visited) < self.num_all_configs
def update(self):
for exp_id, (exp, err) in self.rm.finished_experiments.items():
feature_val = []
if err:
logger.info(
f"Skipping exp_id = {exp_id}, exp_name = {exp['name']}, the experiment did not run successfully with error = {err}, thus a metrics.txt does not exist for it. Please check the stderr.log in {exp['result_dir']}"
)
ds_config = exp["ds_config"]
flattened_ds_config = flatten(ds_config)
for k, v in flattened_ds_config.items():
if isinstance(v, numbers.Number):
feature_val.append(v)
self.evaluated_configs.append(feature_val)
self.evaluated_perf.append(0.0)
continue
p = exp["ds_config"][AUTOTUNING][AUTOTUNING_METRIC_PATH]
with open(p, 'r') as f:
results = hjson.load(f)
curr_iter = results[self.metric]
logger.debug(f"parsing the results for {exp_id}, Result is {curr_iter}")
ds_config = exp["ds_config"]
flattened_ds_config = flatten(ds_config)
for k, v in flattened_ds_config.items():
if isinstance(v, numbers.Number):
feature_val.append(v)
self.evaluated_configs.append(feature_val)
self.evaluated_perf.append(curr_iter)
logger.debug(f"**Evaluated configs: {len(self.evaluated_configs)}, evaluated perf: {self.evaluated_perf}")
self.cost_model.fit(self.evaluated_configs, self.evaluated_perf)
estimated_top_configs = self.find_estimated_top_configs()
self.trials = estimated_top_configs
self.trial_pt = 0
self.train_ct += 1
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import numpy as np
import itertools
from ..utils import *
import collections.abc
def index_to_feature(p, dims):
"""convert index form (single integer) to feature form (vector)"""
feature = []
for dim in dims:
feature.append(p % dim)
p //= dim
return feature
def feature_to_index(feature, dims):
"""convert feature form (vector) to index form (single integer)"""
p = 0
for j, k in enumerate(feature):
print("j:", "k:", k, "dims", dims[:j])
p += int(np.prod(dims[:j])) * k
return p
def dict_to_dims(tuning_space):
dims = []
for key, val in tuning_space.items():
if isinstance(val, dict):
dims.extend(dict_to_dims(val))
elif isinstance(val, list):
dims.append(len(val))
else:
dims.append(1)
return dims
def gen_combinations(d: dict):
keys, values = d.keys(), d.values()
for v in values:
if not isinstance(v, list):
v = [v]
values_choices = (gen_combinations(v) if isinstance(v, dict) else get_list(v) for v in values)
for comb in itertools.product(*values_choices):
yield dict(zip(keys, comb))
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def dict_to_feature(feature_dict, keys, max_value=None):
"""Extract values from dict"""
feature = []
for key, val in feature_dict.items(): # First level
if key not in keys:
continue
if val is None or val == "auto" or key == "autotuning" or val == "":
continue
if isinstance(val, dict):
feature.append(dict_to_feature(val, max_value))
else:
feature.append(float(val))
# normalization, should not matter in tree models
if max_value is not None:
norm_feature = []
for f, mv in zip(feature, max_value):
norm_feature.append(f / mv)
feature = norm_feature
return feature
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .utils import *
try:
import xgboost as xgb
except ImportError:
xgb = None
class XGBoostCostModel():
def __init__(self, loss_type, num_threads=None, log_interval=25, upper_model=None):
assert xgb is not None, "missing requirements, please install deepspeed w. 'autotuning_ml' extra."
self.loss_type = loss_type
if loss_type == "reg":
self.xgb_params = {
"max_depth": 3,
"gamma": 0.0001,
"min_child_weight": 1,
"subsample": 1.0,
"eta": 0.3,
"lambda": 1.0,
"alpha": 0,
"objective": "reg:linear",
}
elif loss_type == "rank":
self.xgb_params = {
"max_depth": 3,
"gamma": 0.0001,
"min_child_weight": 1,
"subsample": 1.0,
"eta": 0.3,
"lambda": 1.0,
"alpha": 0,
"objective": "rank:pairwise",
}
else:
raise RuntimeError("Invalid loss type: " + loss_type)
self.xgb_params["verbosity"] = 0
if num_threads:
self.xgb_params["nthread"] = num_threads
def fit(self, xs, ys):
x_train = np.array(xs, dtype=np.float32)
y_train = np.array(ys, dtype=np.float32)
y_max = np.max(y_train)
y_train = y_train / max(y_max, 1e-9)
index = np.random.permutation(len(x_train))
dtrain = xgb.DMatrix(x_train[index], y_train[index])
self.bst = xgb.train(self.xgb_params, dtrain)
def predict(self, xs):
features = xgb.DMatrix(xs)
return self.bst.predict(features)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import random
from .base_tuner import BaseTuner
class RandomTuner(BaseTuner):
"""Explore the search space in random order"""
def __init__(self, exps: list, resource_manager, metric):
super().__init__(exps, resource_manager, metric)
def next_batch(self, sample_size=1):
if sample_size > len(self.all_exps):
sample_size = len(self.all_exps)
sampled_batch = random.sample(self.all_exps, sample_size)
self.all_exps = [x for x in self.all_exps if x not in sampled_batch]
return sampled_batch
class GridSearchTuner(BaseTuner):
"""Explore the search space in sequential order"""
def __init__(self, exps: list, resource_manager, metric):
super().__init__(exps, resource_manager, metric)
def next_batch(self, sample_size=1):
if sample_size > len(self.all_exps):
sample_size = len(self.all_exps)
sampled_batch = self.all_exps[0:sample_size]
self.all_exps = [x for x in self.all_exps if x not in sampled_batch]
return sampled_batch
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
The file has been adapted from two fairscale files:
(1) https://github.com/facebookresearch/fairscale/blob/master/fairscale/nn/moe/moe_layer.py
(2) https://github.com/facebookresearch/fairscale/blob/master/fairscale/nn/moe/top2gate.py
Git commit hash: 34df606902a240567a0d898037ece55c2f1336cf
We retain the following license from the original files:
"""
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from deepspeed.utils.timer import SynchronizedWallClockTimer
from deepspeed.utils import logger
from typing import Callable, Dict, TYPE_CHECKING, Any, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import Module
import torch.nn.functional as F
from deepspeed.utils import groups
from .mappings import drop_tokens, gather_tokens
if TYPE_CHECKING:
Base = Module[Tensor]
else:
Base = Module
uniform_map: Dict[torch.device, Callable] = {}
gumbel_map: Dict[torch.device, Callable] = {}
exp_selection_uniform_map: Dict[torch.device, Callable] = {}
try:
# To enable Tutel MoE optimizations:
# python3 -m pip install --user --upgrade git+https://github.com/microsoft/[email protected]
from tutel import moe as tutel_moe
TUTEL_INSTALLED = True
except:
# Fail silently so we don't spam logs unnecessarily if user isn't using tutel
TUTEL_INSTALLED = False
pass
def multiplicative_jitter(x, device: torch.device, epsilon=1e-2):
"""
Modified from switch transformer paper. mesh transformers
Multiply values by a random number between 1-epsilon and 1+epsilon.
Makes models more resilient to rounding errors introduced by bfloat16.
This seems particularly important for logits.
Args:
x: a torch.tensor
device: torch.device
epsilon: a floating point value
Returns:
a jittered x.
"""
if epsilon == 0:
return x
uniform = uniform_map.get(device)
if uniform is None:
uniform = torch.distributions.uniform.Uniform(low=torch.tensor(1.0 - epsilon, device=device),
high=torch.tensor(1.0 + epsilon,
device=device)).rsample # type: ignore
uniform_map[device] = uniform
return x * uniform(x.shape)
def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor:
gumbel = gumbel_map.get(device)
if gumbel is None:
one = torch.tensor(1.0, device=device)
zero = torch.tensor(0.0, device=device)
gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore
gumbel_map[device] = gumbel
return gumbel(shape)
from deepspeed import comm as dist
# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity
# See https://arxiv.org/pdf/2006.16668.pdf for details.
# Based on https://github.com/pytorch/pytorch/pull/40762
class _AllToAll(torch.autograd.Function):
@staticmethod
def forward(
ctx: Any,
# TODO: replace with DS process group
group: torch.distributed.ProcessGroup,
input: Tensor) -> Tensor: # type: ignore
ctx.group = group
input = input.contiguous()
output = torch.empty_like(input)
dist.all_to_all_single(output, input, group=group)
return output
@staticmethod
def backward(ctx: Any, *grad_output: Tensor) -> Tuple[None, Tensor]:
return (None, _AllToAll.apply(ctx.group, *grad_output))
# einsum rewrites are on par or more performant
# switch can be bubbled up in future
USE_EINSUM = True
# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity
# See https://arxiv.org/pdf/2006.16668.pdf for details.
def einsum(rule, a, b):
if USE_EINSUM:
return torch.einsum(rule, a, b)
elif rule == 's,se->se':
return a.reshape(a.shape[0], -1) * b
elif rule == 'se,sc->sec':
return a.unsqueeze(2) * b.unsqueeze(1)
elif rule == 'se,se->s':
return torch.bmm(a.unsqueeze(1), b.unsqueeze(2)).reshape(-1)
elif rule == 'sec,sm->ecm':
s = a.shape[0]
e = a.shape[1]
c = a.shape[2]
m = b.shape[1]
return torch.matmul(a.reshape(s, -1).t(), b).reshape(e, c, m)
elif rule == 'sec,ecm->sm':
return torch.matmul(a.reshape(a.shape[0], -1), b.reshape(-1, b.shape[-1]))
elif rule == 'ks,ksm->sm':
k = b.shape[0]
s = b.shape[1]
m = b.shape[2]
# [k, s] -> [s, k] -> [s, 1, k]
a = a.t().unsqueeze(1)
# [k,s,m] -> [k, sm] -> [sm, k] -> [s, m, k]
b = b.reshape(k, -1).t().reshape(s, m, k)
# bmm([s, 1, k], [s, m, k]^t) -> [s, m, 1]
return torch.bmm(a, b.transpose(1, 2)).squeeze(2)
else:
return torch.einsum(rule, a, b)
# The following functions are extracted and scripted
# because otherwise during a torch.jit.trace, the non-Tensor
# values used in the calculations get recorded as constants.
# torch.jit.script coerces them into Tensors and preserves
# their dynamic shapes. This enables ONNX export.
# We can't script the entire top1gating function because it
# includes stateful caching logic which is incompatible with ONNX.
@torch.jit.script
def _capacity(gates: Tensor, capacity_factor: Tensor, min_capacity: Tensor) -> Tensor:
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
# to(torch.int64) works around a bug in torch.onnx.export:
# it should cast k to int64 when converting torch.topk but it doesn't.
capacity = torch.ceil((num_tokens / num_experts) * capacity_factor).to(torch.int64)
if capacity < min_capacity:
capacity = min_capacity.to(torch.int64)
return capacity
@torch.jit.script
def _top_idx(source, k):
return torch.topk(source, k=k, dim=0)[1]
@torch.jit.script
def _one_hot_to_float(x, num_classes):
return F.one_hot(x, num_classes=num_classes).float()
def top1gating(logits: Tensor,
capacity_factor: float,
min_capacity: int,
used_token: Tensor = None,
noisy_gate_policy: Optional[str] = None,
drop_tokens: bool = True,
use_rts: bool = True,
use_tutel: bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Implements Top1Gating on logits."""
if noisy_gate_policy == 'RSample':
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
# everything is in fp32 in this function
gates = F.softmax(logits, dim=1)
capacity = _capacity(gates, torch.tensor(capacity_factor), torch.tensor(min_capacity))
# Create a mask for 1st's expert per token
# noisy gating
indices1_s = torch.argmax(logits_w_noise if noisy_gate_policy == 'RSample' else gates, dim=1)
num_experts = int(gates.shape[1])
mask1 = F.one_hot(indices1_s, num_classes=num_experts)
# mask only used tokens
if used_token is not None:
mask1 = einsum("s,se->se", used_token, mask1)
# gating decisions
exp_counts = torch.sum(mask1, dim=0).detach().to('cpu')
# if we don't want to drop any tokens
if not drop_tokens:
new_capacity = torch.max(exp_counts).to(logits.device)
dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=dist.get_world_group())
capacity = new_capacity
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.float(), dim=0)
l_aux = torch.sum(me * ce) * num_experts
# Random Token Selection
if use_rts:
uniform = exp_selection_uniform_map.get(logits.device)
if uniform is None:
uniform = torch.distributions.uniform.Uniform(low=torch.tensor(0.0, device=logits.device),
high=torch.tensor(1.0, device=logits.device)).rsample
exp_selection_uniform_map[logits.device] = uniform
mask1_rand = mask1 * uniform(mask1.shape)
else:
mask1_rand = mask1
assert logits.shape[
0] >= min_capacity, "No. of tokens (batch-size) should be greater than min_capacity. Either set min_capacity to 0 or increase your batch size."
top_idx = _top_idx(mask1_rand, capacity)
new_mask1 = mask1 * torch.zeros_like(mask1).scatter_(0, top_idx, 1)
mask1 = new_mask1
if use_tutel:
# Tutel doesn't support index values masked with zero
# so we need to replace masked indices with -1
indices_mask = mask1.sum(dim=1) * num_experts - 1
indices1_s = torch.min(indices1_s, indices_mask)
# Compute locations in capacity buffer
if use_tutel:
locations1 = tutel_moe.fast_cumsum_sub_one(mask1)
else:
locations1 = torch.cumsum(mask1, dim=0) - 1
if use_tutel:
gates1_s = (gates * mask1).sum(dim=1)
locations1_s = torch.sum(locations1 * mask1, dim=1)
return l_aux, capacity, num_experts, [
indices1_s,
], [
locations1_s,
], [
gates1_s,
], exp_counts
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
# Normalize gate probabilities
mask1_float = mask1.float()
gates = gates * mask1_float
locations1_sc = _one_hot_to_float(locations1_s, capacity)
combine_weights = einsum("se,sc->sec", gates, locations1_sc)
dispatch_mask = combine_weights.bool()
return l_aux, combine_weights, dispatch_mask, exp_counts
def top2gating(logits: Tensor, capacity_factor: float, min_capacity: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Implements Top2Gating on logits."""
# everything is in fp32 in this function
gates = F.softmax(logits, dim=1)
capacity = _capacity(gates, torch.tensor(capacity_factor * 2), torch.tensor(min_capacity))
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1)
num_experts = int(gates.shape[1])
mask1 = F.one_hot(indices1_s, num_classes=num_experts)
# Create a mask for 2nd's expert per token using Gumbel-max trick
# https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
# Replace top-expert with min value
logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float("-inf"))
indices2_s = torch.argmax(logits_except1, dim=1)
mask2 = F.one_hot(indices2_s, num_classes=num_experts)
# Compute locations in capacity buffer
locations1 = torch.cumsum(mask1, dim=0) - 1
locations2 = torch.cumsum(mask2, dim=0) - 1
# Update 2nd's location by accounting for locations of 1st
locations2 += torch.sum(mask1, dim=0, keepdim=True)
# gating decisions
exp_counts = torch.sum(mask1, dim=0).detach().to('cpu')
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.float(), dim=0)
l_aux = torch.mean(me * ce) * num_experts * num_experts
# Remove locations outside capacity from mask
mask1 *= torch.lt(locations1, capacity)
mask2 *= torch.lt(locations2, capacity)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
locations2_s = torch.sum(locations2 * mask2, dim=1)
# Normalize gate probabilities
mask1_float = mask1.float()
mask2_float = mask2.float()
gates1_s = einsum("se,se->s", gates, mask1_float)
gates2_s = einsum("se,se->s", gates, mask2_float)
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s /= denom_s
gates2_s /= denom_s
# Calculate combine_weights and dispatch_mask
gates1 = einsum("s,se->se", gates1_s, mask1_float)
gates2 = einsum("s,se->se", gates2_s, mask2_float)
locations1_sc = _one_hot_to_float(locations1_s, capacity)
locations2_sc = _one_hot_to_float(locations2_s, capacity)
combine1_sec = einsum("se,sc->sec", gates1, locations1_sc)
combine2_sec = einsum("se,sc->sec", gates2, locations2_sc)
combine_weights = combine1_sec + combine2_sec
dispatch_mask = combine_weights.bool()
return l_aux, combine_weights, dispatch_mask, exp_counts
class TopKGate(Module):
"""Gate module which implements Top2Gating as described in Gshard_.
::
gate = TopKGate(model_dim, num_experts)
l_aux, combine_weights, dispatch_mask = gate(input)
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
model_dim (int):
size of model embedding dimension
num_experts (ints):
number of experts in model
"""
wg: torch.nn.Linear
def __init__(self,
model_dim: int,
num_experts: int,
k: int = 1,
capacity_factor: float = 1.0,
eval_capacity_factor: float = 1.0,
min_capacity: int = 8,
noisy_gate_policy: Optional[str] = None,
drop_tokens: bool = True,
use_rts: bool = True) -> None:
super().__init__()
# Only top-1 and top-2 are supported at the moment.
if k != 1 and k != 2:
raise ValueError('Only top-1 and top-2 gatings are supported.')
self.wg = torch.nn.Linear(model_dim, num_experts, bias=False).float()
self.k = k
self.capacity_factor = capacity_factor
self.eval_capacity_factor = eval_capacity_factor
self.min_capacity = min_capacity
self.noisy_gate_policy = noisy_gate_policy
self.timers = SynchronizedWallClockTimer()
self.wall_clock_breakdown = False
self.gate_time = 0.0
self.drop_tokens = drop_tokens
self.use_rts = use_rts
def forward(self,
input: torch.Tensor,
used_token: torch.Tensor = None,
use_tutel: bool = False) -> Tuple[Tensor, Tensor, Tensor]: # type: ignore
if self.wall_clock_breakdown:
self.timers('TopKGate').start()
if self.wg.weight.dtype != torch.float32:
self.wg = self.wg.float()
input_fp32 = input.float()
# input jittering
if self.noisy_gate_policy == 'Jitter' and self.training:
input_fp32 = multiplicative_jitter(input_fp32, device=input.device)
logits = self.wg(input_fp32)
if self.k == 1:
gate_output = top1gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor,
self.min_capacity, used_token, self.noisy_gate_policy if self.training else None,
self.drop_tokens, self.use_rts, use_tutel)
else:
gate_output = top2gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor,
self.min_capacity)
if self.wall_clock_breakdown:
self.timers('TopKGate').stop()
self.gate_time = self.timers('TopKGate').elapsed(reset=False)
return gate_output
class MOELayer(Base):
"""MOELayer module which implements MixtureOfExperts as described in Gshard_.
::
gate = TopKGate(model_dim, num_experts)
moe = MOELayer(gate, expert)
output = moe(input)
l_aux = moe.l_aux
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
gate (torch.nn.Module):
gate network
expert (torch.nn.Module):
expert network
"""
def __init__(self,
gate: Module,
experts: Module,
ep_group_name,
ep_size,
num_local_experts: int,
use_tutel: bool = False) -> None:
super().__init__()
self.gate = gate
self.experts = experts
self.ep_group = None
self.ep_size = ep_size
self.ep_group_name = ep_group_name
self.num_local_experts = num_local_experts
self.time_falltoall = 0.0
self.time_salltoall = 0.0
self.time_moe = 0.0
self.timers = SynchronizedWallClockTimer()
self.wall_clock_breakdown = False
self.use_tutel = use_tutel and TUTEL_INSTALLED and gate.k == 1
if self.use_tutel:
logger.info('Using Tutel optimizations.')
elif use_tutel and not TUTEL_INSTALLED:
logger.warning("Tutel optimization requested but not installed. "
"Proceeding without Tutel.")
elif use_tutel and TUTEL_INSTALLED and gate.k != 1:
logger.warning("To enable Tutel optimization, use top-1 instead of top-2 gate. "
"Proceeding without Tutel.")
def _set_ep_group(self, ep_group):
self.ep_group = ep_group
def forward(self, *input: Tensor, **kwargs: Any) -> Tensor:
if self.wall_clock_breakdown:
self.timers('moe').start()
# Implement Algorithm 2 from GShard paper.
d_model = input[0].shape[-1]
# Initial implementation -> Reshape into S tokens by dropping sequence dimension.
# Reshape into G groups so that each group can distribute tokens equally
# group_size = kwargs['group_size'] if 'group_size' in kwargs.keys() else 1
reshaped_input = input[0].reshape(-1, d_model)
if self.use_tutel:
self.l_aux, C, E, indices_, locations_, gates_, self.exp_counts = self.gate(reshaped_input, input[1], True)
S, M = reshaped_input.size(0), reshaped_input.size(1)
if not hasattr(self, '_tutel_dispatcher'):
self._tutel_dispatcher = tutel_moe.fast_dispatcher(E, C, M, dispatch_dtype=reshaped_input.dtype)
self._tutel_dispatcher.update(indices_, locations_, gates_, capacity=C)
dispatched_input = self._tutel_dispatcher.encode(reshaped_input)
else:
self.l_aux, combine_weights, dispatch_mask, self.exp_counts = self.gate(reshaped_input, input[1])
dispatched_input = einsum("sec,sm->ecm", dispatch_mask.type_as(input[0]), reshaped_input)
if self.wall_clock_breakdown:
self.timers('falltoall').start()
if groups._get_expert_model_parallel_world_size() == 1:
# If the non-expert is tensor-parallel, it will create
# duplicate tokens on the tensor-parallel ranks.
# Since our experts are not tensor-parallel, these duplicates
# need to be dropped to ensure correctness.
# this also doubles up as a communication optimization as we are
# reducing the all-to-all communication volume.
dispatched_input = drop_tokens(dispatched_input, dim=1)
dispatched_input = _AllToAll.apply(self.ep_group, dispatched_input)
if self.wall_clock_breakdown:
self.timers('falltoall').stop()
self.time_falltoall = self.timers('falltoall').elapsed(reset=False)
# Re-shape after all-to-all: ecm -> gecm
dispatched_input = dispatched_input.reshape(self.ep_size, self.num_local_experts, -1, d_model)
expert_output = self.experts(dispatched_input)
if self.wall_clock_breakdown:
self.timers('salltoall').start()
expert_output = _AllToAll.apply(self.ep_group, expert_output)
if self.wall_clock_breakdown:
self.timers('salltoall').stop()
self.time_salltoall = self.timers('salltoall').elapsed(reset=False)
# Re-shape back: gecm -> ecm
expert_output = expert_output.reshape(self.ep_size * self.num_local_experts, -1, d_model)
if groups._get_expert_model_parallel_world_size() == 1:
# the dropped duplicate tokens need to be gathered on each
# tensor parallel rank again for the tensor-parallel
# non-expert of the next layer.
expert_output = gather_tokens(expert_output, dim=1)
if self.use_tutel:
combined_output = self._tutel_dispatcher.decode(expert_output.view(E * C, M))
else:
combined_output = einsum("sec,ecm->sm", combine_weights.type_as(input[0]), expert_output)
a = combined_output.reshape(input[0].shape)
if self.wall_clock_breakdown:
self.timers('moe').stop()
self.time_moe = self.timers('moe').elapsed(reset=False)
return a
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from typing import List, Tuple, Dict
import torch
from .layer import MoE
def has_moe_layers(m):
has_moe = False
num_experts = 0
for _, module in m.named_modules():
if isinstance(module, MoE):
has_moe = True
num_experts = module.num_experts
break
return has_moe, num_experts
def is_moe_param(param: torch.Tensor) -> bool:
if hasattr(param, "allreduce") and not param.allreduce:
return True
return False
def split_params_into_shared_and_expert_params(
params: List[torch.nn.Parameter]) -> Tuple[torch.nn.Parameter, torch.nn.Parameter]:
shared_params, expert_params = [], []
for p in params:
if is_moe_param(p):
expert_params.append(p)
else:
shared_params.append(p)
return shared_params, expert_params
def split_params_grads_into_shared_and_expert_params(
group: List[torch.nn.Parameter]) -> Tuple[torch.nn.Parameter, torch.nn.Parameter]:
"""Split grad of parameters into grads of non-expert params
and grads of expert params. This is useful while computing
grad-norms for clipping and overflow detection
group (List[torch.nn.Parameter]):
Args:
The group of parameters to split
Returns:
Tuple[List[torch.nn.Parameter], List[torch.nn.Parameter]]:
list of gradients for non MoE params, list of gradients of MoE params
"""
expert_grads = []
shared_grads = []
for p in group:
if p.grad is not None:
if is_moe_param(p):
expert_grads.append(p.grad.to(p.dtype))
else:
shared_grads.append(p.grad.to(p.dtype))
return shared_grads, expert_grads
def split_params_into_different_moe_groups_for_optimizer(param_groups: Tuple[Dict],
max_group_size=178956971) -> Tuple[Dict]:
"""Split parameters into different MoE groups for optimizer
Args:
param_groups (Tuple[Dict]):
The list of parameter groups to split
Returns:
Tuple[Dict]:
list of MoE/non-MoE groups for optimizer
"""
if isinstance(param_groups, tuple):
param_groups = list(param_groups) # Tuple cannot be modified
elif isinstance(param_groups, dict):
param_groups = [param_groups]
elif not isinstance(param_groups, list):
raise ValueError(f"Unknown param group type of {type(param_groups)}")
# gather all data parallel group names
data_parallel_group_names = set()
for param_group in param_groups:
for param in param_group["params"]:
if is_moe_param(param):
data_parallel_group_names.add(param.group_name)
data_parallel_group_names = list(data_parallel_group_names)
group_moe = {}
# Create the param MoE groups, leave param assign to next step
for param_group in param_groups:
group_moe[param_group['name']] = {}
for key in data_parallel_group_names:
group_moe[param_group['name']][key] = {}
group_moe[param_group['name']][key]['name'] = key
group_moe[param_group['name']][key]['moe'] = True
for ori_key in param_group.keys():
if ori_key != 'name':
if ori_key == 'params':
group_moe[param_group['name']][key][ori_key] = []
else:
group_moe[param_group['name']][key][ori_key] = param_group[ori_key]
# Assign param
for param_group in param_groups:
new_params = []
for param in param_group['params']:
if is_moe_param(param):
group_moe[param_group['name']][param.group_name]['params'].append(param)
# param_group['params'].remove(param)
else:
new_params.append(param)
param_group['params'] = new_params
# Flatten the moe groups
if max_group_size is not None:
for k, v in group_moe.items():
for k1, v1 in v.items():
cur_group = []
all_groups = []
size_of_cur_group = 0
for param in v1['params']:
if size_of_cur_group + param.numel() <= max_group_size:
cur_group.append(param)
size_of_cur_group += param.numel()
else:
all_groups.append(cur_group)
cur_group = [param]
size_of_cur_group = param.numel()
if cur_group:
all_groups.append(cur_group)
for group in all_groups:
new_dict = {}
for key, val in v1.items():
if key != 'params':
new_dict[key] = val
new_dict['params'] = group
param_groups.append(new_dict)
else:
for k, v in group_moe.items():
for k1, v1 in v.items():
param_groups.append(v1)
return tuple(param_groups)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.utils import log_dist
from deepspeed.utils import groups
from .sharded_moe import MOELayer, TopKGate
from .experts import Experts
import typing
class MoE(torch.nn.Module):
"""Initialize an MoE layer.
Arguments:
hidden_size (int): the hidden dimension of the model, importantly this is also the input and output dimension.
expert (torch.nn.Module): the torch module that defines the expert (e.g., MLP, torch.linear).
num_experts (int, optional): default=1, the total number of experts per layer.
ep_size (int, optional): default=1, number of ranks in the expert parallel world or group.
k (int, optional): default=1, top-k gating value, only supports k=1 or k=2.
capacity_factor (float, optional): default=1.0, the capacity of the expert at training time.
eval_capacity_factor (float, optional): default=1.0, the capacity of the expert at eval time.
min_capacity (int, optional): default=4, the minimum capacity per expert regardless of the capacity_factor.
use_residual (bool, optional): default=False, make this MoE layer a Residual MoE (https://arxiv.org/abs/2201.05596) layer.
noisy_gate_policy (str, optional): default=None, noisy gate policy, valid options are 'Jitter', 'RSample' or 'None'.
drop_tokens (bool, optional): default=True, whether to drop tokens - (setting to False is equivalent to infinite capacity).
use_rts (bool, optional): default=True, whether to use Random Token Selection.
use_tutel (bool, optional): default=False, whether to use Tutel optimizations (if installed).
enable_expert_tensor_parallelism (bool, optional): default=False, whether to use tensor parallelism for experts
"""
def __init__(self,
hidden_size,
expert,
num_experts=1,
ep_size=1,
k=1,
capacity_factor=1.,
eval_capacity_factor=1.,
min_capacity=4,
use_residual=False,
noisy_gate_policy: typing.Optional[str] = None,
drop_tokens: bool = True,
use_rts=True,
use_tutel: bool = False,
enable_expert_tensor_parallelism: bool = False):
super(MoE, self).__init__()
self.use_residual = use_residual
self.enable_expert_tensor_parallelism = enable_expert_tensor_parallelism
assert num_experts % ep_size == 0, f"Number of experts ({num_experts}) should be divisible by expert parallel size ({ep_size})"
self.ep_size = ep_size
self.expert_group_name = f"ep_size_{self.ep_size}"
self.num_experts = num_experts
self.num_local_experts = num_experts // self.ep_size
log_dist(
f'Creating MoE layer with num_experts: {num_experts} | num_local_experts: {self.num_local_experts} | expert_parallel_size: {self.ep_size}',
[0])
assert noisy_gate_policy is None or noisy_gate_policy in ['None', 'Jitter', 'RSample'], \
'Unsupported noisy_gate_policy: ' + noisy_gate_policy
experts = Experts(expert, self.num_local_experts, self.expert_group_name)
self.deepspeed_moe = MOELayer(TopKGate(hidden_size, num_experts, k, capacity_factor, eval_capacity_factor,
min_capacity, noisy_gate_policy, drop_tokens, use_rts),
experts,
self.expert_group_name,
self.ep_size,
self.num_local_experts,
use_tutel=use_tutel)
if self.use_residual:
self.mlp = expert
# coefficient is used for weighted sum of the output of expert and mlp
self.coefficient = torch.nn.Linear(hidden_size, 2)
def set_deepspeed_parallelism(self):
self._create_process_groups()
def _create_process_groups(self):
# Create process group for a layer if needed
if self.expert_group_name not in groups._get_expert_parallel_group_dict():
print(f"No existing process group found, creating a new group named: {self.expert_group_name}")
if (groups.mpu is None) or (not self.enable_expert_tensor_parallelism):
# Condition 1 - no groups.mpu means no tensor parallelism
# Condition 2 - disabling expert tensor parallelism on purpose
groups._create_expert_and_data_parallel(self.ep_size)
else:
# expert tensor parallelism is enabled
groups._create_expert_data_and_model_parallel(self.ep_size, mpu=groups.mpu)
# Set the group handle for the MOELayer (deepspeed_moe) object
self.deepspeed_moe._set_ep_group(groups._get_expert_parallel_group(self.expert_group_name))
def forward(self, hidden_states, used_token=None):
""" MoE forward
Arguments:
hidden_states (Tensor): input to the layer
used_token (Tensor, optional): default: None, mask only used tokens
Returns:
A tuple including output, gate loss, and expert count.
* output (Tensor): output of the model
* l_aux (Tensor): gate loss value
* exp_counts (int): expert count
"""
output = self.deepspeed_moe(hidden_states, used_token)
if self.use_residual:
# Residual MoE
output_mlp = self.mlp(hidden_states)
if type(output_mlp) is tuple:
output_mlp = output_mlp[0] # Ignore the bias term for now
coef = self.coefficient(hidden_states)
coef = torch.nn.functional.softmax(coef, dim=-1)
output = output * coef[..., 0:1] + output_mlp * coef[..., 1:]
return output, self.deepspeed_moe.l_aux, self.deepspeed_moe.exp_counts
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import copy
class Experts(torch.nn.Module):
def __init__(self, expert, num_local_experts=1, expert_group_name=None):
super(Experts, self).__init__()
self.deepspeed_experts = torch.nn.ModuleList([copy.deepcopy(expert) for i in range(num_local_experts)])
self.num_local_experts = num_local_experts
# TODO: revisit allreduce for moe.gate...
for expert in self.deepspeed_experts:
# TODO: Create param groups to handle expert + data case (e.g. param.group = moe_group)
for name, param in expert.named_parameters():
param.allreduce = False
param.group_name = expert_group_name
def forward(self, inputs):
chunks = inputs.chunk(self.num_local_experts, dim=1)
expert_outputs = []
for chunk, expert in zip(chunks, self.deepspeed_experts):
out = expert(chunk)
if type(out) is tuple:
out = out[0] # Ignore the bias term for now
expert_outputs += [out]
expert_output = torch.cat(expert_outputs, dim=1)
return expert_output
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# The file has been adapted from the following Megatron-LM file:
# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/mpu/mappings.py
# Git commit hash: 9dc3c42a84aa656f583703cf8b6b4f79f712b796
# We retain the following copyright from the original files:
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import deepspeed
def _gather_tokens(input_, dim=0):
"""Gather tensors and concatenate them along a dimension"""
mpu = deepspeed.utils.groups.mpu
input_ = input_.contiguous()
# Size and dimension.
rank = mpu.get_tensor_model_parallel_rank()
tensor_list = [torch.empty_like(input_) for _ in range(mpu.get_tensor_model_parallel_world_size())]
tensor_list[rank] = input_
deepspeed.comm.all_gather(tensor_list, input_, group=mpu.get_tensor_model_parallel_group())
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=dim).contiguous()
return output
def _drop_tokens(input_, dim=0):
"""Divide a tensor among the tensor parallel ranks"""
mpu = deepspeed.utils.groups.mpu
total_chunks = mpu.get_tensor_model_parallel_world_size()
this_chunk = mpu.get_tensor_model_parallel_rank()
assert input_.shape[
dim] % total_chunks == 0, f"input dimension {dim} ({input_.shape[dim]}) is not divisible by tensor parallel world size ({total_chunks})"
chunk_size = input_.shape[dim] // total_chunks
return torch.narrow(input_, dim, this_chunk * chunk_size, chunk_size)
class _GatherTokens(torch.autograd.Function):
"""All gather tokens among the tensor parallel ranks"""
@staticmethod
def symbolic(graph, input_, dim):
return _gather_tokens(input_, dim)
@staticmethod
def forward(ctx, input_, dim):
ctx.dim = dim
return _gather_tokens(input_, dim)
@staticmethod
def backward(ctx, grad_output):
return _drop_tokens(grad_output, ctx.dim), None
class _DropTokens(torch.autograd.Function):
"Divide tokens equally among the tensor parallel ranks"
@staticmethod
def symbolic(graph, input_, dim):
return _drop_tokens(input_, dim)
@staticmethod
def forward(ctx, input_, dim):
ctx.dim = dim
return _drop_tokens(input_, dim)
@staticmethod
def backward(ctx, input_):
return _gather_tokens(input_, ctx.dim), None
def gather_tokens(input_, dim=0):
mpu = deepspeed.utils.groups.mpu
if mpu is None or mpu.get_tensor_model_parallel_world_size() == 1:
# no tensor parallelism for non-experts
return input_
return _GatherTokens.apply(input_, dim)
def drop_tokens(input_, dim=0):
mpu = deepspeed.utils.groups.mpu
if mpu is None or mpu.get_tensor_model_parallel_world_size() == 1:
# no tensor parallelism for non-experts
return input_
return _DropTokens.apply(input_, dim)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import CUDAOpBuilder
import sys
class FusedAdamBuilder(CUDAOpBuilder):
BUILD_VAR = "DS_BUILD_FUSED_ADAM"
NAME = "fused_adam"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.adam.{self.NAME}_op'
def sources(self):
return ['csrc/adam/fused_adam_frontend.cpp', 'csrc/adam/multi_tensor_adam.cu']
def include_paths(self):
return ['csrc/includes', 'csrc/adam']
def cxx_args(self):
args = super().cxx_args()
return args + self.version_dependent_macros()
def nvcc_args(self):
nvcc_flags = ['-O3'] + self.version_dependent_macros()
if not self.is_rocm_pytorch():
nvcc_flags.extend(
['-allow-unsupported-compiler' if sys.platform == "win32" else '', '-lineinfo', '--use_fast_math'] +
self.compute_capability_args())
return nvcc_flags
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from .builder import TorchCPUOpBuilder
class CPUAdagradBuilder(TorchCPUOpBuilder):
BUILD_VAR = "DS_BUILD_CPU_ADAGRAD"
NAME = "cpu_adagrad"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.adagrad.{self.NAME}_op'
def sources(self):
if self.build_for_cpu:
return ['csrc/adagrad/cpu_adagrad.cpp']
return ['csrc/adagrad/cpu_adagrad.cpp', 'csrc/common/custom_cuda_kernel.cu']
def libraries_args(self):
args = super().libraries_args()
if self.build_for_cpu:
return args
if not self.is_rocm_pytorch():
args += ['curand']
return args
def include_paths(self):
import torch
if self.build_for_cpu:
CUDA_INCLUDE = []
elif not self.is_rocm_pytorch():
CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")]
else:
CUDA_INCLUDE = [
os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include"),
os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "rocrand"),
os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "hiprand"),
]
return ['csrc/includes'] + CUDA_INCLUDE
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import CUDAOpBuilder, installed_cuda_version
class InferenceBuilder(CUDAOpBuilder):
BUILD_VAR = "DS_BUILD_TRANSFORMER_INFERENCE"
NAME = "transformer_inference"
def __init__(self, name=None):
name = self.NAME if name is None else name
super().__init__(name=name)
def absolute_name(self):
return f'deepspeed.ops.transformer.inference.{self.NAME}_op'
def is_compatible(self, verbose=True):
try:
import torch
except ImportError:
self.warning("Please install torch if trying to pre-compile inference kernels")
return False
cuda_okay = True
if not self.is_rocm_pytorch() and torch.cuda.is_available():
sys_cuda_major, _ = installed_cuda_version()
torch_cuda_major = int(torch.version.cuda.split('.')[0])
cuda_capability = torch.cuda.get_device_properties(0).major
if cuda_capability < 6:
self.warning("NVIDIA Inference is only supported on Pascal and newer architectures")
cuda_okay = False
if cuda_capability >= 8:
if torch_cuda_major < 11 or sys_cuda_major < 11:
self.warning("On Ampere and higher architectures please use CUDA 11+")
cuda_okay = False
return super().is_compatible(verbose) and cuda_okay
def filter_ccs(self, ccs):
ccs_retained = []
ccs_pruned = []
for cc in ccs:
if int(cc[0]) >= 6:
ccs_retained.append(cc)
else:
ccs_pruned.append(cc)
if len(ccs_pruned) > 0:
self.warning(f"Filtered compute capabilities {ccs_pruned}")
return ccs_retained
def sources(self):
return [
'csrc/transformer/inference/csrc/pt_binding.cpp',
'csrc/transformer/inference/csrc/gelu.cu',
'csrc/transformer/inference/csrc/relu.cu',
'csrc/transformer/inference/csrc/layer_norm.cu',
'csrc/transformer/inference/csrc/rms_norm.cu',
'csrc/transformer/inference/csrc/softmax.cu',
'csrc/transformer/inference/csrc/dequantize.cu',
'csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu',
'csrc/transformer/inference/csrc/transform.cu',
'csrc/transformer/inference/csrc/pointwise_ops.cu',
]
def extra_ldflags(self):
if not self.is_rocm_pytorch():
return ['-lcurand']
else:
return []
def include_paths(self):
return ['csrc/transformer/inference/includes', 'csrc/includes']
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import sys
import os
import pkgutil
import importlib
from .builder import get_default_compute_capabilities, OpBuilder
# Do not remove, required for abstract accelerator to detect if we have a deepspeed or 3p op_builder
__deepspeed__ = True
# List of all available op builders from deepspeed op_builder
try:
import deepspeed.ops.op_builder # noqa: F401
op_builder_dir = "deepspeed.ops.op_builder"
except ImportError:
op_builder_dir = "op_builder"
__op_builders__ = []
this_module = sys.modules[__name__]
def builder_closure(member_name):
if op_builder_dir == "op_builder":
# during installation time cannot get builder due to torch not installed,
# return closure instead
def _builder():
from deepspeed.accelerator import get_accelerator
builder = get_accelerator().create_op_builder(member_name)
return builder
return _builder
else:
# during runtime, return op builder class directly
from deepspeed.accelerator import get_accelerator
builder = get_accelerator().get_op_builder(member_name)
return builder
# reflect builder names and add builder closure, such as 'TransformerBuilder()' creates op builder wrt current accelerator
for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(this_module.__file__)]):
if module_name != 'all_ops' and module_name != 'builder':
module = importlib.import_module(f".{module_name}", package=op_builder_dir)
for member_name in module.__dir__():
if member_name.endswith('Builder') and member_name != "OpBuilder" and member_name != "CUDAOpBuilder":
# assign builder name to variable with same name
# the following is equivalent to i.e. TransformerBuilder = "TransformerBuilder"
this_module.__dict__[member_name] = builder_closure(member_name)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import CUDAOpBuilder
class QuantizerBuilder(CUDAOpBuilder):
BUILD_VAR = "DS_BUILD_QUANTIZER"
NAME = "quantizer"
def __init__(self, name=None):
name = self.NAME if name is None else name
super().__init__(name=name)
def absolute_name(self):
return f'deepspeed.ops.quantizer.{self.NAME}_op'
def sources(self):
return [
'csrc/quantization/pt_binding.cpp',
'csrc/quantization/fake_quantizer.cu',
'csrc/quantization/quantize.cu',
'csrc/quantization/dequantize.cu',
]
def include_paths(self):
return ['csrc/includes']
def extra_ldflags(self):
return ['-lcurand']
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import OpBuilder
try:
from packaging import version as pkg_version
except ImportError:
pkg_version = None
class SparseAttnBuilder(OpBuilder):
BUILD_VAR = "DS_BUILD_SPARSE_ATTN"
NAME = "sparse_attn"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.sparse_attention.{self.NAME}_op'
def sources(self):
return ['csrc/sparse_attention/utils.cpp']
def cxx_args(self):
return ['-O2', '-fopenmp']
def is_compatible(self, verbose=True):
# Check to see if llvm and cmake are installed since they are dependencies
#required_commands = ['llvm-config|llvm-config-9', 'cmake']
#command_status = list(map(self.command_exists, required_commands))
#deps_compatible = all(command_status)
if self.is_rocm_pytorch():
self.warning(f'{self.NAME} is not compatible with ROCM')
return False
try:
import torch
except ImportError:
self.warning(f"unable to import torch, please install it first")
return False
# torch-cpu will not have a cuda version
if torch.version.cuda is None:
cuda_compatible = False
self.warning(f"{self.NAME} cuda is not available from torch")
else:
major, minor = torch.version.cuda.split('.')[:2]
cuda_compatible = (int(major) == 10 and int(minor) >= 1) or (int(major) >= 11)
if not cuda_compatible:
self.warning(f"{self.NAME} requires CUDA version 10.1+")
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
torch_compatible = (TORCH_MAJOR == 1 and TORCH_MINOR >= 5)
if not torch_compatible:
self.warning(
f'{self.NAME} requires a torch version >= 1.5 and < 2.0 but detected {TORCH_MAJOR}.{TORCH_MINOR}')
try:
import triton
except ImportError:
# auto-install of triton is broken on some systems, reverting to manual install for now
# see this issue: https://github.com/microsoft/DeepSpeed/issues/1710
self.warning(f"please install triton==1.0.0 if you want to use sparse attention")
return False
if pkg_version:
installed_triton = pkg_version.parse(triton.__version__)
triton_mismatch = installed_triton != pkg_version.parse("1.0.0")
else:
installed_triton = triton.__version__
triton_mismatch = installed_triton != "1.0.0"
if triton_mismatch:
self.warning(f"using untested triton version ({installed_triton}), only 1.0.0 is known to be compatible")
return False
return super().is_compatible(verbose) and torch_compatible and cuda_compatible
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import CUDAOpBuilder
class RandomLTDBuilder(CUDAOpBuilder):
BUILD_VAR = "DS_BUILD_RANDOM_LTD"
NAME = "random_ltd"
def __init__(self, name=None):
name = self.NAME if name is None else name
super().__init__(name=name)
def absolute_name(self):
return f'deepspeed.ops.{self.NAME}_op'
def extra_ldflags(self):
if not self.is_rocm_pytorch():
return ['-lcurand']
else:
return []
def sources(self):
return [
'csrc/random_ltd/pt_binding.cpp', 'csrc/random_ltd/gather_scatter.cu',
'csrc/random_ltd/slice_attn_masks.cu', 'csrc/random_ltd/token_sort.cu'
]
def include_paths(self):
includes = ['csrc/includes']
if self.is_rocm_pytorch():
from torch.utils.cpp_extension import ROCM_HOME
includes += ['{}/hiprand/include'.format(ROCM_HOME), '{}/rocrand/include'.format(ROCM_HOME)]
return includes
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import sys
import time
import importlib
from pathlib import Path
import subprocess
import shlex
import shutil
import tempfile
import distutils.ccompiler
import distutils.log
import distutils.sysconfig
from distutils.errors import CompileError, LinkError
from abc import ABC, abstractmethod
from typing import List
YELLOW = '\033[93m'
END = '\033[0m'
WARNING = f"{YELLOW} [WARNING] {END}"
DEFAULT_TORCH_EXTENSION_PATH = "/tmp/torch_extensions"
DEFAULT_COMPUTE_CAPABILITIES = "6.0;6.1;7.0"
try:
import torch
except ImportError:
print(f"{WARNING} unable to import torch, please install it if you want to pre-compile any deepspeed ops.")
else:
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
def installed_cuda_version(name=""):
import torch.utils.cpp_extension
cuda_home = torch.utils.cpp_extension.CUDA_HOME
assert cuda_home is not None, "CUDA_HOME does not exist, unable to compile CUDA op(s)"
# Ensure there is not a cuda version mismatch between torch and nvcc compiler
output = subprocess.check_output([cuda_home + "/bin/nvcc", "-V"], universal_newlines=True)
output_split = output.split()
release_idx = output_split.index("release")
release = output_split[release_idx + 1].replace(',', '').split(".")
# Ignore patch versions, only look at major + minor
cuda_major, cuda_minor = release[:2]
return int(cuda_major), int(cuda_minor)
def get_default_compute_capabilities():
compute_caps = DEFAULT_COMPUTE_CAPABILITIES
import torch.utils.cpp_extension
if torch.utils.cpp_extension.CUDA_HOME is not None and installed_cuda_version()[0] >= 11:
if installed_cuda_version()[0] == 11 and installed_cuda_version()[1] == 0:
# Special treatment of CUDA 11.0 because compute_86 is not supported.
compute_caps += ";8.0"
else:
compute_caps += ";8.0;8.6"
return compute_caps
# list compatible minor CUDA versions - so that for example pytorch built with cuda-11.0 can be used
# to build deepspeed and system-wide installed cuda 11.2
cuda_minor_mismatch_ok = {
10: [
"10.0",
"10.1",
"10.2",
],
11: ["11.0", "11.1", "11.2", "11.3", "11.4", "11.5", "11.6", "11.7", "11.8"],
12: ["12.0", "12.1"],
}
def assert_no_cuda_mismatch(name=""):
cuda_major, cuda_minor = installed_cuda_version(name)
sys_cuda_version = f'{cuda_major}.{cuda_minor}'
torch_cuda_version = ".".join(torch.version.cuda.split('.')[:2])
# This is a show-stopping error, should probably not proceed past this
if sys_cuda_version != torch_cuda_version:
if (cuda_major in cuda_minor_mismatch_ok and sys_cuda_version in cuda_minor_mismatch_ok[cuda_major]
and torch_cuda_version in cuda_minor_mismatch_ok[cuda_major]):
print(f"Installed CUDA version {sys_cuda_version} does not match the "
f"version torch was compiled with {torch.version.cuda} "
"but since the APIs are compatible, accepting this combination")
return True
elif os.getenv("DS_SKIP_CUDA_CHECK", "0") == "1":
print(
f"{WARNING} DeepSpeed Op Builder: Installed CUDA version {sys_cuda_version} does not match the "
f"version torch was compiled with {torch.version.cuda}."
"Detected `DS_SKIP_CUDA_CHECK=1`: Allowing this combination of CUDA, but it may result in unexpected behavior."
)
return True
raise Exception(f">- DeepSpeed Op Builder: Installed CUDA version {sys_cuda_version} does not match the "
f"version torch was compiled with {torch.version.cuda}, unable to compile "
"cuda/cpp extensions without a matching cuda version.")
return True
class OpBuilder(ABC):
_rocm_version = None
_is_rocm_pytorch = None
def __init__(self, name):
self.name = name
self.jit_mode = False
self.build_for_cpu = False
self.enable_bf16 = False
self.error_log = None
@abstractmethod
def absolute_name(self):
'''
Returns absolute build path for cases where the op is pre-installed, e.g., deepspeed.ops.adam.cpu_adam
will be installed as something like: deepspeed/ops/adam/cpu_adam.so
'''
pass
@abstractmethod
def sources(self):
'''
Returns list of source files for your op, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed)
'''
pass
def hipify_extension(self):
pass
@staticmethod
def validate_torch_version(torch_info):
install_torch_version = torch_info['version']
current_torch_version = ".".join(torch.__version__.split('.')[:2])
if install_torch_version != current_torch_version:
raise RuntimeError("PyTorch version mismatch! DeepSpeed ops were compiled and installed "
"with a different version than what is being used at runtime. "
f"Please re-install DeepSpeed or switch torch versions. "
f"Install torch version={install_torch_version}, "
f"Runtime torch version={current_torch_version}")
@staticmethod
def validate_torch_op_version(torch_info):
if not OpBuilder.is_rocm_pytorch():
current_cuda_version = ".".join(torch.version.cuda.split('.')[:2])
install_cuda_version = torch_info['cuda_version']
if install_cuda_version != current_cuda_version:
raise RuntimeError("CUDA version mismatch! DeepSpeed ops were compiled and installed "
"with a different version than what is being used at runtime. "
f"Please re-install DeepSpeed or switch torch versions. "
f"Install CUDA version={install_cuda_version}, "
f"Runtime CUDA version={current_cuda_version}")
else:
current_hip_version = ".".join(torch.version.hip.split('.')[:2])
install_hip_version = torch_info['hip_version']
if install_hip_version != current_hip_version:
raise RuntimeError("HIP version mismatch! DeepSpeed ops were compiled and installed "
"with a different version than what is being used at runtime. "
f"Please re-install DeepSpeed or switch torch versions. "
f"Install HIP version={install_hip_version}, "
f"Runtime HIP version={current_hip_version}")
@staticmethod
def is_rocm_pytorch():
if OpBuilder._is_rocm_pytorch is not None:
return OpBuilder._is_rocm_pytorch
_is_rocm_pytorch = False
try:
import torch
except ImportError:
pass
else:
if TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 5):
_is_rocm_pytorch = hasattr(torch.version, 'hip') and torch.version.hip is not None
if _is_rocm_pytorch:
from torch.utils.cpp_extension import ROCM_HOME
_is_rocm_pytorch = ROCM_HOME is not None
OpBuilder._is_rocm_pytorch = _is_rocm_pytorch
return OpBuilder._is_rocm_pytorch
@staticmethod
def installed_rocm_version():
if OpBuilder._rocm_version:
return OpBuilder._rocm_version
ROCM_MAJOR = '0'
ROCM_MINOR = '0'
if OpBuilder.is_rocm_pytorch():
from torch.utils.cpp_extension import ROCM_HOME
rocm_ver_file = Path(ROCM_HOME).joinpath(".info/version-dev")
if rocm_ver_file.is_file():
with open(rocm_ver_file, 'r') as file:
ROCM_VERSION_DEV_RAW = file.read()
elif "rocm" in torch.__version__:
ROCM_VERSION_DEV_RAW = torch.__version__.split("rocm")[1]
else:
assert False, "Could not detect ROCm version"
assert ROCM_VERSION_DEV_RAW != "", "Could not detect ROCm version"
ROCM_MAJOR = ROCM_VERSION_DEV_RAW.split('.')[0]
ROCM_MINOR = ROCM_VERSION_DEV_RAW.split('.')[1]
OpBuilder._rocm_version = (int(ROCM_MAJOR), int(ROCM_MINOR))
return OpBuilder._rocm_version
def include_paths(self):
'''
Returns list of include paths, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed)
'''
return []
def nvcc_args(self):
'''
Returns optional list of compiler flags to forward to nvcc when building CUDA sources
'''
return []
def cxx_args(self):
'''
Returns optional list of compiler flags to forward to the build
'''
return []
def is_compatible(self, verbose=True):
'''
Check if all non-python dependencies are satisfied to build this op
'''
return True
def extra_ldflags(self):
return []
def libraries_installed(self, libraries):
valid = False
check_cmd = 'dpkg -l'
for lib in libraries:
result = subprocess.Popen(f'dpkg -l {lib}', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
valid = valid or result.wait() == 0
return valid
def has_function(self, funcname, libraries, verbose=False):
'''
Test for existence of a function within a tuple of libraries.
This is used as a smoke test to check whether a certain library is available.
As a test, this creates a simple C program that calls the specified function,
and then distutils is used to compile that program and link it with the specified libraries.
Returns True if both the compile and link are successful, False otherwise.
'''
tempdir = None # we create a temporary directory to hold various files
filestderr = None # handle to open file to which we redirect stderr
oldstderr = None # file descriptor for stderr
try:
# Echo compile and link commands that are used.
if verbose:
distutils.log.set_verbosity(1)
# Create a compiler object.
compiler = distutils.ccompiler.new_compiler(verbose=verbose)
# Configure compiler and linker to build according to Python install.
distutils.sysconfig.customize_compiler(compiler)
# Create a temporary directory to hold test files.
tempdir = tempfile.mkdtemp()
# Define a simple C program that calls the function in question
prog = "void %s(void); int main(int argc, char** argv) { %s(); return 0; }" % (funcname, funcname)
# Write the test program to a file.
filename = os.path.join(tempdir, 'test.c')
with open(filename, 'w') as f:
f.write(prog)
# Redirect stderr file descriptor to a file to silence compile/link warnings.
if not verbose:
filestderr = open(os.path.join(tempdir, 'stderr.txt'), 'w')
oldstderr = os.dup(sys.stderr.fileno())
os.dup2(filestderr.fileno(), sys.stderr.fileno())
# Workaround for behavior in distutils.ccompiler.CCompiler.object_filenames()
# Otherwise, a local directory will be used instead of tempdir
drive, driveless_filename = os.path.splitdrive(filename)
root_dir = driveless_filename[0] if os.path.isabs(driveless_filename) else ''
output_dir = os.path.join(drive, root_dir)
# Attempt to compile the C program into an object file.
cflags = shlex.split(os.environ.get('CFLAGS', ""))
objs = compiler.compile([filename], output_dir=output_dir, extra_preargs=self.strip_empty_entries(cflags))
# Attempt to link the object file into an executable.
# Be sure to tack on any libraries that have been specified.
ldflags = shlex.split(os.environ.get('LDFLAGS', ""))
compiler.link_executable(objs,
os.path.join(tempdir, 'a.out'),
extra_preargs=self.strip_empty_entries(ldflags),
libraries=libraries)
# Compile and link succeeded
return True
except CompileError:
return False
except LinkError:
return False
except:
return False
finally:
# Restore stderr file descriptor and close the stderr redirect file.
if oldstderr is not None:
os.dup2(oldstderr, sys.stderr.fileno())
if filestderr is not None:
filestderr.close()
# Delete the temporary directory holding the test program and stderr files.
if tempdir is not None:
shutil.rmtree(tempdir)
def strip_empty_entries(self, args):
'''
Drop any empty strings from the list of compile and link flags
'''
return [x for x in args if len(x) > 0]
def cpu_arch(self):
try:
from cpuinfo import get_cpu_info
except ImportError as e:
cpu_info = self._backup_cpuinfo()
if cpu_info is None:
return "-march=native"
try:
cpu_info = get_cpu_info()
except Exception as e:
self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), "
"falling back to `lscpu` to get this information.")
cpu_info = self._backup_cpuinfo()
if cpu_info is None:
return "-march=native"
if cpu_info['arch'].startswith('PPC_'):
# gcc does not provide -march on PowerPC, use -mcpu instead
return '-mcpu=native'
return '-march=native'
def is_cuda_enable(self):
try:
assert_no_cuda_mismatch(self.name)
return '-D__ENABLE_CUDA__'
except BaseException:
print(f"{WARNING} {self.name} cuda is missing or is incompatible with installed torch, "
"only cpu ops can be compiled!")
return '-D__DISABLE_CUDA__'
return '-D__DISABLE_CUDA__'
def _backup_cpuinfo(self):
# Construct cpu_info dict from lscpu that is similar to what py-cpuinfo provides
if not self.command_exists('lscpu'):
self.warning(f"{self.name} attempted to query 'lscpu' after failing to use py-cpuinfo "
"to detect the CPU architecture. 'lscpu' does not appear to exist on "
"your system, will fall back to use -march=native and non-vectorized execution.")
return None
result = subprocess.check_output('lscpu', shell=True)
result = result.decode('utf-8').strip().lower()
cpu_info = {}
cpu_info['arch'] = None
cpu_info['flags'] = ""
if 'genuineintel' in result or 'authenticamd' in result:
cpu_info['arch'] = 'X86_64'
if 'avx512' in result:
cpu_info['flags'] += 'avx512,'
elif 'avx512f' in result:
cpu_info['flags'] += 'avx512f,'
if 'avx2' in result:
cpu_info['flags'] += 'avx2'
elif 'ppc64le' in result:
cpu_info['arch'] = "PPC_"
return cpu_info
def simd_width(self):
try:
from cpuinfo import get_cpu_info
except ImportError as e:
cpu_info = self._backup_cpuinfo()
if cpu_info is None:
return '-D__SCALAR__'
try:
cpu_info = get_cpu_info()
except Exception as e:
self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), "
"falling back to `lscpu` to get this information.")
cpu_info = self._backup_cpuinfo()
if cpu_info is None:
return '-D__SCALAR__'
if cpu_info['arch'] == 'X86_64':
if 'avx512' in cpu_info['flags'] or 'avx512f' in cpu_info['flags']:
return '-D__AVX512__'
elif 'avx2' in cpu_info['flags']:
return '-D__AVX256__'
return '-D__SCALAR__'
def command_exists(self, cmd):
if '|' in cmd:
cmds = cmd.split("|")
else:
cmds = [cmd]
valid = False
for cmd in cmds:
result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)
valid = valid or result.wait() == 0
if not valid and len(cmds) > 1:
print(f"{WARNING} {self.name} requires one of the following commands '{cmds}', but it does not exist!")
elif not valid and len(cmds) == 1:
print(f"{WARNING} {self.name} requires the '{cmd}' command, but it does not exist!")
return valid
def warning(self, msg):
self.error_log = f"{msg}"
print(f"{WARNING} {msg}")
def deepspeed_src_path(self, code_path):
if os.path.isabs(code_path):
return code_path
else:
return os.path.join(Path(__file__).parent.parent.absolute(), code_path)
def builder(self):
from torch.utils.cpp_extension import CppExtension
return CppExtension(name=self.absolute_name(),
sources=self.strip_empty_entries(self.sources()),
include_dirs=self.strip_empty_entries(self.include_paths()),
extra_compile_args={'cxx': self.strip_empty_entries(self.cxx_args())},
extra_link_args=self.strip_empty_entries(self.extra_ldflags()))
def load(self, verbose=True):
from deepspeed.git_version_info import installed_ops, torch_info
if installed_ops.get(self.name, False):
# Ensure the op we're about to load was compiled with the same
# torch/cuda versions we are currently using at runtime.
self.validate_torch_version(torch_info)
if torch.cuda.is_available() and isinstance(self, CUDAOpBuilder):
self.validate_torch_op_version(torch_info)
return importlib.import_module(self.absolute_name())
else:
return self.jit_load(verbose)
def jit_load(self, verbose=True):
if not self.is_compatible(verbose):
raise RuntimeError(
f"Unable to JIT load the {self.name} op due to it not being compatible due to hardware/software issue. {self.error_log}"
)
try:
import ninja # noqa: F401
except ImportError:
raise RuntimeError(f"Unable to JIT load the {self.name} op due to ninja not being installed.")
if isinstance(self, CUDAOpBuilder) and not self.is_rocm_pytorch():
try:
assert_no_cuda_mismatch(self.name)
self.build_for_cpu = False
except BaseException:
self.build_for_cpu = True
self.jit_mode = True
from torch.utils.cpp_extension import load
start_build = time.time()
sources = [self.deepspeed_src_path(path) for path in self.sources()]
extra_include_paths = [self.deepspeed_src_path(path) for path in self.include_paths()]
# Torch will try and apply whatever CCs are in the arch list at compile time,
# we have already set the intended targets ourselves we know that will be
# needed at runtime. This prevents CC collisions such as multiple __half
# implementations. Stash arch list to reset after build.
torch_arch_list = None
if "TORCH_CUDA_ARCH_LIST" in os.environ:
torch_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST")
os.environ["TORCH_CUDA_ARCH_LIST"] = ""
nvcc_args = self.strip_empty_entries(self.nvcc_args())
cxx_args = self.strip_empty_entries(self.cxx_args())
if isinstance(self, CUDAOpBuilder):
if not self.build_for_cpu and self.enable_bf16:
cxx_args.append("-DBF16_AVAILABLE")
nvcc_args.append("-DBF16_AVAILABLE")
op_module = load(name=self.name,
sources=self.strip_empty_entries(sources),
extra_include_paths=self.strip_empty_entries(extra_include_paths),
extra_cflags=cxx_args,
extra_cuda_cflags=nvcc_args,
extra_ldflags=self.strip_empty_entries(self.extra_ldflags()),
verbose=verbose)
build_duration = time.time() - start_build
if verbose:
print(f"Time to load {self.name} op: {build_duration} seconds")
# Reset arch list so we are not silently removing it for other possible use cases
if torch_arch_list:
os.environ["TORCH_CUDA_ARCH_LIST"] = torch_arch_list
return op_module
class CUDAOpBuilder(OpBuilder):
def compute_capability_args(self, cross_compile_archs=None):
"""
Returns nvcc compute capability compile flags.
1. `TORCH_CUDA_ARCH_LIST` takes priority over `cross_compile_archs`.
2. If neither is set default compute capabilities will be used
3. Under `jit_mode` compute capabilities of all visible cards will be used plus PTX
Format:
- `TORCH_CUDA_ARCH_LIST` may use ; or whitespace separators. Examples:
TORCH_CUDA_ARCH_LIST="6.1;7.5;8.6" pip install ...
TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6+PTX" pip install ...
- `cross_compile_archs` uses ; separator.
"""
ccs = []
if self.jit_mode:
# Compile for underlying architectures since we know those at runtime
for i in range(torch.cuda.device_count()):
CC_MAJOR, CC_MINOR = torch.cuda.get_device_capability(i)
cc = f"{CC_MAJOR}.{CC_MINOR}"
if cc not in ccs:
ccs.append(cc)
ccs = sorted(ccs)
ccs[-1] += '+PTX'
else:
# Cross-compile mode, compile for various architectures
# env override takes priority
cross_compile_archs_env = os.environ.get('TORCH_CUDA_ARCH_LIST', None)
if cross_compile_archs_env is not None:
if cross_compile_archs is not None:
print(
f"{WARNING} env var `TORCH_CUDA_ARCH_LIST={cross_compile_archs_env}` overrides `cross_compile_archs={cross_compile_archs}`"
)
cross_compile_archs = cross_compile_archs_env.replace(' ', ';')
else:
if cross_compile_archs is None:
cross_compile_archs = get_default_compute_capabilities()
ccs = cross_compile_archs.split(';')
ccs = self.filter_ccs(ccs)
if len(ccs) == 0:
raise RuntimeError(
f"Unable to load {self.name} op due to no compute capabilities remaining after filtering")
args = []
self.enable_bf16 = True
for cc in ccs:
num = cc[0] + cc[2]
args.append(f'-gencode=arch=compute_{num},code=sm_{num}')
if cc.endswith('+PTX'):
args.append(f'-gencode=arch=compute_{num},code=compute_{num}')
if int(cc[0]) <= 7:
self.enable_bf16 = False
return args
def filter_ccs(self, ccs: List[str]):
"""
Prune any compute capabilities that are not compatible with the builder. Should log
which CCs have been pruned.
"""
return ccs
def version_dependent_macros(self):
# Fix from apex that might be relevant for us as well, related to https://github.com/NVIDIA/apex/issues/456
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
return version_ge_1_1 + version_ge_1_3 + version_ge_1_5
def is_compatible(self, verbose=True):
return super().is_compatible(verbose)
def builder(self):
try:
assert_no_cuda_mismatch(self.name)
self.build_for_cpu = False
except BaseException:
self.build_for_cpu = True
if self.build_for_cpu:
from torch.utils.cpp_extension import CppExtension as ExtensionBuilder
else:
from torch.utils.cpp_extension import CUDAExtension as ExtensionBuilder
compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())} if self.build_for_cpu else \
{'cxx': self.strip_empty_entries(self.cxx_args()), \
'nvcc': self.strip_empty_entries(self.nvcc_args())}
if not self.build_for_cpu and self.enable_bf16:
compile_args['cxx'].append("-DBF16_AVAILABLE")
cuda_ext = ExtensionBuilder(name=self.absolute_name(),
sources=self.strip_empty_entries(self.sources()),
include_dirs=self.strip_empty_entries(self.include_paths()),
libraries=self.strip_empty_entries(self.libraries_args()),
extra_compile_args=compile_args)
if self.is_rocm_pytorch():
# hip converts paths to absolute, this converts back to relative
sources = cuda_ext.sources
curr_file = Path(__file__).parent.parent # ds root
for i in range(len(sources)):
src = Path(sources[i])
if src.is_absolute():
sources[i] = str(src.relative_to(curr_file))
else:
sources[i] = str(src)
cuda_ext.sources = sources
return cuda_ext
def hipify_extension(self):
if self.is_rocm_pytorch():
from torch.utils.hipify import hipify_python
hipify_python.hipify(
project_directory=os.getcwd(),
output_directory=os.getcwd(),
header_include_dirs=self.include_paths(),
includes=[os.path.join(os.getcwd(), '*')],
extra_files=[os.path.abspath(s) for s in self.sources()],
show_detailed=True,
is_pytorch_extension=True,
hipify_extra_files_only=True,
)
def cxx_args(self):
if sys.platform == "win32":
return ['-O2']
else:
return ['-O3', '-std=c++14', '-g', '-Wno-reorder']
def nvcc_args(self):
if self.build_for_cpu:
return []
args = ['-O3']
if self.is_rocm_pytorch():
ROCM_MAJOR, ROCM_MINOR = self.installed_rocm_version()
args += [
'-std=c++14', '-U__HIP_NO_HALF_OPERATORS__', '-U__HIP_NO_HALF_CONVERSIONS__',
'-U__HIP_NO_HALF2_OPERATORS__',
'-DROCM_VERSION_MAJOR=%s' % ROCM_MAJOR,
'-DROCM_VERSION_MINOR=%s' % ROCM_MINOR
]
else:
cuda_major, _ = installed_cuda_version()
args += [
'-allow-unsupported-compiler' if sys.platform == "win32" else '', '--use_fast_math',
'-std=c++17' if sys.platform == "win32" and cuda_major > 10 else '-std=c++14',
'-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__'
]
if os.environ.get('DS_DEBUG_CUDA_BUILD', '0') == '1':
args.append('--ptxas-options=-v')
args += self.compute_capability_args()
return args
def libraries_args(self):
if self.build_for_cpu:
return []
if sys.platform == "win32":
return ['cublas', 'curand']
else:
return []
class TorchCPUOpBuilder(CUDAOpBuilder):
def extra_ldflags(self):
if self.build_for_cpu:
return ['-fopenmp']
if not self.is_rocm_pytorch():
return ['-lcurand']
return []
def cxx_args(self):
import torch
args = []
if not self.build_for_cpu:
if not self.is_rocm_pytorch():
CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.CUDA_HOME, "lib64")
else:
CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.ROCM_HOME, "lib")
args += super().cxx_args()
args += [
f'-L{CUDA_LIB64}',
'-lcudart',
'-lcublas',
'-g',
]
CPU_ARCH = self.cpu_arch()
SIMD_WIDTH = self.simd_width()
CUDA_ENABLE = self.is_cuda_enable()
args += [
CPU_ARCH,
'-fopenmp',
SIMD_WIDTH,
CUDA_ENABLE,
]
return args
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import CUDAOpBuilder, installed_cuda_version
class SpatialInferenceBuilder(CUDAOpBuilder):
BUILD_VAR = "DS_BUILD_SPATIAL_INFERENCE"
NAME = "spatial_inference"
def __init__(self, name=None):
name = self.NAME if name is None else name
super().__init__(name=name)
def absolute_name(self):
return f'deepspeed.ops.spatial.{self.NAME}_op'
def is_compatible(self, verbose=True):
try:
import torch
except ImportError:
self.warning("Please install torch if trying to pre-compile inference kernels")
return False
cuda_okay = True
if not self.is_rocm_pytorch() and torch.cuda.is_available():
sys_cuda_major, _ = installed_cuda_version()
torch_cuda_major = int(torch.version.cuda.split('.')[0])
cuda_capability = torch.cuda.get_device_properties(0).major
if cuda_capability >= 8:
if torch_cuda_major < 11 or sys_cuda_major < 11:
self.warning("On Ampere and higher architectures please use CUDA 11+")
cuda_okay = False
return super().is_compatible(verbose) and cuda_okay
def sources(self):
return [
'csrc/spatial/csrc/opt_bias_add.cu',
'csrc/spatial/csrc/pt_binding.cpp',
]
def include_paths(self):
return ['csrc/spatial/includes', 'csrc/includes']
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from .builder import TorchCPUOpBuilder
class CPUAdamBuilder(TorchCPUOpBuilder):
BUILD_VAR = "DS_BUILD_CPU_ADAM"
NAME = "cpu_adam"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.adam.{self.NAME}_op'
def sources(self):
if self.build_for_cpu:
return ['csrc/adam/cpu_adam.cpp']
return ['csrc/adam/cpu_adam.cpp', 'csrc/common/custom_cuda_kernel.cu']
def libraries_args(self):
args = super().libraries_args()
if self.build_for_cpu:
return args
if not self.is_rocm_pytorch():
args += ['curand']
return args
def include_paths(self):
import torch
if self.build_for_cpu:
CUDA_INCLUDE = []
elif not self.is_rocm_pytorch():
CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")]
else:
CUDA_INCLUDE = [
os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include"),
os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "rocrand"),
os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "hiprand"),
]
return ['csrc/includes'] + CUDA_INCLUDE
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import OpBuilder
class UtilsBuilder(OpBuilder):
BUILD_VAR = "DS_BUILD_UTILS"
NAME = "utils"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.{self.NAME}_op'
def sources(self):
return ['csrc/utils/flatten_unflatten.cpp']
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import CUDAOpBuilder
class TransformerBuilder(CUDAOpBuilder):
BUILD_VAR = "DS_BUILD_TRANSFORMER"
NAME = "transformer"
def __init__(self, name=None):
name = self.NAME if name is None else name
super().__init__(name=name)
def absolute_name(self):
return f'deepspeed.ops.transformer.{self.NAME}_op'
def extra_ldflags(self):
if not self.is_rocm_pytorch():
return ['-lcurand']
else:
return []
def sources(self):
return [
'csrc/transformer/ds_transformer_cuda.cpp', 'csrc/transformer/cublas_wrappers.cu',
'csrc/transformer/transform_kernels.cu', 'csrc/transformer/gelu_kernels.cu',
'csrc/transformer/dropout_kernels.cu', 'csrc/transformer/normalize_kernels.cu',
'csrc/transformer/softmax_kernels.cu', 'csrc/transformer/general_kernels.cu'
]
def include_paths(self):
includes = ['csrc/includes']
if self.is_rocm_pytorch():
from torch.utils.cpp_extension import ROCM_HOME
includes += ['{}/hiprand/include'.format(ROCM_HOME), '{}/rocrand/include'.format(ROCM_HOME)]
return includes
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import pkgutil
import importlib
try:
# during installation time accelerator is visible, otherwise return deepspeed.accelerator
from accelerator import get_accelerator
except ImportError:
from deepspeed.accelerator import get_accelerator
# List of all available ops
# reflect all builder names into __op_builders__
op_builder_dir = get_accelerator().op_builder_dir()
op_builder_module = importlib.import_module(op_builder_dir)
__op_builders__ = []
for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(op_builder_module.__file__)]):
# avoid self references
if module_name != 'all_ops' and module_name != 'builder':
module = importlib.import_module("{}.{}".format(op_builder_dir, module_name))
for member_name in module.__dir__():
if member_name.endswith('Builder'):
# append builder to __op_builders__ list
builder = get_accelerator().create_op_builder(member_name)
__op_builders__.append(builder)
ALL_OPS = {op.name: op for op in __op_builders__ if op is not None}
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .transformer import TransformerBuilder
class StochasticTransformerBuilder(TransformerBuilder):
BUILD_VAR = "DS_BUILD_STOCHASTIC_TRANSFORMER"
NAME = "stochastic_transformer"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.transformer.{self.NAME}_op'
def nvcc_args(self):
args = super().nvcc_args()
args.append('-D__STOCHASTIC_MODE__')
return args
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import CUDAOpBuilder
import sys
class FusedLambBuilder(CUDAOpBuilder):
BUILD_VAR = 'DS_BUILD_FUSED_LAMB'
NAME = "fused_lamb"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.lamb.{self.NAME}_op'
def sources(self):
return ['csrc/lamb/fused_lamb_cuda.cpp', 'csrc/lamb/fused_lamb_cuda_kernel.cu']
def include_paths(self):
return ['csrc/includes']
def cxx_args(self):
args = super().cxx_args()
return args + self.version_dependent_macros()
def nvcc_args(self):
nvcc_flags = ['-O3'] + self.version_dependent_macros()
if self.is_rocm_pytorch():
ROCM_MAJOR, ROCM_MINOR = self.installed_rocm_version()
nvcc_flags += ['-DROCM_VERSION_MAJOR=%s' % ROCM_MAJOR, '-DROCM_VERSION_MINOR=%s' % ROCM_MINOR]
else:
nvcc_flags.extend(
['-allow-unsupported-compiler' if sys.platform == "win32" else '', '-lineinfo', '--use_fast_math'] +
self.compute_capability_args())
return nvcc_flags
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import distutils.spawn
import subprocess
from .builder import OpBuilder
class AsyncIOBuilder(OpBuilder):
BUILD_VAR = "DS_BUILD_AIO"
NAME = "async_io"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.aio.{self.NAME}_op'
def sources(self):
return [
'csrc/aio/py_lib/deepspeed_py_copy.cpp', 'csrc/aio/py_lib/py_ds_aio.cpp',
'csrc/aio/py_lib/deepspeed_py_aio.cpp', 'csrc/aio/py_lib/deepspeed_py_aio_handle.cpp',
'csrc/aio/py_lib/deepspeed_aio_thread.cpp', 'csrc/aio/common/deepspeed_aio_utils.cpp',
'csrc/aio/common/deepspeed_aio_common.cpp', 'csrc/aio/common/deepspeed_aio_types.cpp',
'csrc/aio/py_lib/deepspeed_pin_tensor.cpp'
]
def include_paths(self):
return ['csrc/aio/py_lib', 'csrc/aio/common']
def cxx_args(self):
# -O0 for improved debugging, since performance is bound by I/O
CPU_ARCH = self.cpu_arch()
SIMD_WIDTH = self.simd_width()
return [
'-g',
'-Wall',
'-O0',
'-std=c++14',
'-shared',
'-fPIC',
'-Wno-reorder',
CPU_ARCH,
'-fopenmp',
SIMD_WIDTH,
'-laio',
]
def extra_ldflags(self):
return ['-laio']
def check_for_libaio_pkg(self):
libs = dict(
dpkg=["-l", "libaio-dev", "apt"],
pacman=["-Q", "libaio", "pacman"],
rpm=["-q", "libaio-devel", "yum"],
)
found = False
for pkgmgr, data in libs.items():
flag, lib, tool = data
path = distutils.spawn.find_executable(pkgmgr)
if path is not None:
cmd = f"{pkgmgr} {flag} {lib}"
result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
if result.wait() == 0:
found = True
else:
self.warning(f"{self.NAME}: please install the {lib} package with {tool}")
break
return found
def is_compatible(self, verbose=True):
# Check for the existence of libaio by using distutils
# to compile and link a test program that calls io_submit,
# which is a function provided by libaio that is used in the async_io op.
# If needed, one can define -I and -L entries in CFLAGS and LDFLAGS
# respectively to specify the directories for libaio.h and libaio.so.
aio_compatible = self.has_function('io_submit', ('aio', ))
if verbose and not aio_compatible:
self.warning(f"{self.NAME} requires the dev libaio .so object and headers but these were not found.")
# Check for the libaio package via known package managers
# to print suggestions on which package to install.
self.check_for_libaio_pkg()
self.warning(
"If libaio is already installed (perhaps from source), try setting the CFLAGS and LDFLAGS environment variables to where it can be found."
)
return super().is_compatible(verbose) and aio_compatible
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import CPUOpBuilder
class NotImplementedBuilder(CPUOpBuilder):
BUILD_VAR = "DS_BUILD_NOT_IMPLEMENTED"
NAME = "deepspeed_not_implemented"
def __init__(self, name=None):
name = self.NAME if name is None else name
super().__init__(name=name)
def absolute_name(self):
return f'deepspeed.ops.comm.{self.NAME}_op'
def load(self, verbose=True):
raise ValueError("This op had not been implemented on CPU backend.")
def sources(self):
return []
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from .builder import CPUOpBuilder
class CCLCommBuilder(CPUOpBuilder):
BUILD_VAR = "DS_BUILD_CCL_COMM"
NAME = "deepspeed_ccl_comm"
def __init__(self, name=None):
name = self.NAME if name is None else name
super().__init__(name=name)
def absolute_name(self):
return f'deepspeed.ops.comm.{self.NAME}_op'
def sources(self):
return ['csrc/cpu/comm/ccl.cpp']
def include_paths(self):
includes = ['csrc/cpu/includes']
return includes
def is_compatible(self, verbose=True):
# TODO: add soft compatibility check for private binary release.
# a soft check, as in we know it can be trivially changed.
return super().is_compatible(verbose)
def extra_ldflags(self):
ccl_root_path = os.environ.get("CCL_ROOT")
if ccl_root_path == None:
raise ValueError(
"Didn't find CCL_ROOT, install oneCCL from https://github.com/oneapi-src/oneCCL and source its environment variable"
)
return []
else:
return ['-lccl', f'-L{ccl_root_path}/lib']
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
from .comm import CCLCommBuilder
from .no_impl import NotImplementedBuilder
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
from op_builder.builder import OpBuilder
except ImportError:
from deepspeed.ops.op_builder.builder import OpBuilder
class CPUOpBuilder(OpBuilder):
def builder(self):
from torch.utils.cpp_extension import CppExtension as ExtensionBuilder
compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())}
cpp_ext = ExtensionBuilder(name=self.absolute_name(),
sources=self.strip_empty_entries(self.sources()),
include_dirs=self.strip_empty_entries(self.include_paths()),
libraries=self.strip_empty_entries(self.libraries_args()),
extra_compile_args=compile_args)
return cpp_ext
def cxx_args(self):
return ['-O3', '-std=c++14', '-g', '-Wno-reorder']
def libraries_args(self):
return []
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# tests directory-specific settings - this file is run automatically by pytest before any tests are run
import sys
import pytest
import os
from os.path import abspath, dirname, join
import torch
import warnings
# Set this environment variable for the T5 inference unittest(s) (e.g. google/t5-v1_1-small)
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python'
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
git_repo_path = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
def pytest_configure(config):
config.option.color = "yes"
config.option.durations = 0
config.option.durations_min = 1
config.option.verbose = True
def pytest_addoption(parser):
parser.addoption("--torch_ver", default=None, type=str)
parser.addoption("--cuda_ver", default=None, type=str)
def validate_version(expected, found):
version_depth = expected.count('.') + 1
found = '.'.join(found.split('.')[:version_depth])
return found == expected
@pytest.fixture(scope="session", autouse=True)
def check_environment(pytestconfig):
expected_torch_version = pytestconfig.getoption("torch_ver")
expected_cuda_version = pytestconfig.getoption("cuda_ver")
if expected_torch_version is None:
warnings.warn(
"Running test without verifying torch version, please provide an expected torch version with --torch_ver")
elif not validate_version(expected_torch_version, torch.__version__):
pytest.exit(
f"expected torch version {expected_torch_version} did not match found torch version {torch.__version__}",
returncode=2)
if expected_cuda_version is None:
warnings.warn(
"Running test without verifying cuda version, please provide an expected cuda version with --cuda_ver")
elif not validate_version(expected_cuda_version, torch.version.cuda):
pytest.exit(
f"expected cuda version {expected_cuda_version} did not match found cuda version {torch.version.cuda}",
returncode=2)
# Override of pytest "runtest" for DistributedTest class
# This hook is run before the default pytest_runtest_call
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_call(item):
# We want to use our own launching function for distributed tests
if getattr(item.cls, "is_dist_test", False):
dist_test_class = item.cls()
dist_test_class(item._request)
item.runtest = lambda: True # Dummy function so test is not run twice
@pytest.hookimpl(tryfirst=True)
def pytest_fixture_setup(fixturedef, request):
if getattr(fixturedef.func, "is_dist_fixture", False):
#for val in dir(request):
# print(val.upper(), getattr(request, val), "\n")
dist_fixture_class = fixturedef.func()
dist_fixture_class(request)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.strategies import DeepSpeedStrategy
from torch.utils.data import DataLoader, Dataset
class RandomDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class BoringModel(LightningModule):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
return self.layer(x)
def training_step(self, batch, batch_idx):
loss = self(batch).sum()
self.log("train_loss", loss)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
loss = self(batch).sum()
self.log("valid_loss", loss)
def test_step(self, batch, batch_idx):
loss = self(batch).sum()
self.log("test_loss", loss)
def configure_optimizers(self):
return torch.optim.SGD(self.layer.parameters(), lr=0.1)
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64), batch_size=2)
def val_dataloader(self):
return DataLoader(RandomDataset(32, 64), batch_size=2)
def test_lightning_model():
"""Test that DeepSpeed works with a simple LightningModule and LightningDataModule."""
model = BoringModel()
trainer = Trainer(strategy=DeepSpeedStrategy(), max_epochs=1, precision=16, accelerator="gpu", devices=1)
trainer.fit(model)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import os
import sys
import math
from .common import get_test_path
from deepspeed.pipe import PipelineModule, LayerSpec
from deepspeed.accelerator import get_accelerator
def get_megatron_version():
p = os.popen("pip list --format=columns | grep megatron-lm")
pip_list = p.read()
assert 'megatron-lm' in pip_list, 'Please install Megatron-LM before getting its version'
ver_str = pip_list.split()[1]
return float(ver_str[0])
def get_gpt2_model(args_others, mp_size=1):
from megatron.model import GPT2Model
from megatron.initialize import initialize_megatron
args_defaults = {
'vocab_file': get_test_path('gpt2-vocab.json'),
'merge_file': get_test_path('gpt2-merges.txt'),
'tokenizer_type': 'GPT2BPETokenizer',
}
args_defaults.update(args_others)
# setting "make-vocab-size-divisible-by" to avoid word-embedding size change in resizing testing.
sys.argv.extend(['--model-parallel-size', str(mp_size), '--make-vocab-size-divisible-by', str(1)])
initialize_megatron(args_defaults=args_defaults, ignore_unknown_args=True)
model = GPT2Model(num_tokentypes=0, parallel_output=False)
model.to(get_accelerator().device_name())
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from megatron import mpu
i = get_accelerator().current_device_name()
model = torchDDP(model, device_ids=[i], output_device=i, process_group=mpu.get_data_parallel_group())
return model
class MockGPT2ModelPipe(PipelineModule):
def __init__(self, num_layers, mp_size, args_others, topo, **kwargs):
from megatron.initialize import initialize_megatron
args_defaults = {
'vocab_file': get_test_path('gpt2-vocab.json'),
'merge_file': get_test_path('gpt2-merges.txt'),
'tokenizer_type': 'GPT2BPETokenizer',
}
args_defaults.update(args_others)
# setting "make-vocab-size-divisible-by" to avoid word-embedding size change in resizing testing.
sys.argv.extend(['--model-parallel-size', str(mp_size), '--make-vocab-size-divisible-by', str(1)])
initialize_megatron(args_defaults=args_defaults, ignore_unknown_args=True)
from megatron.model.transformer import ParallelTransformerLayer
class ParallelTransformerLayerPipe(ParallelTransformerLayer):
def forward(self, args):
# hardcode attn mask for testing, PP requires the attn_mask to be stashed
attention_mask = torch.tensor([[True]], device=get_accelerator().current_device_name())
return super().forward(args, attention_mask)
layers = []
for x in range(num_layers):
layers.append(
LayerSpec(ParallelTransformerLayerPipe, self.gpt2_attention_mask_func, self.init_method_normal(0.02),
self.scaled_init_method_normal(0.02, num_layers), x))
super().__init__(layers=layers, loss_fn=torch.nn.CrossEntropyLoss(), topology=topo, **kwargs)
def gpt2_attention_mask_func(self, attention_scores, ltor_mask):
attention_scores.masked_fill_(ltor_mask, -10000.0)
return attention_scores
def init_method_normal(self, sigma):
"""Init method based on N(0, sigma)."""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
return init_
def scaled_init_method_normal(self, sigma, num_layers):
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
std = sigma / math.sqrt(2.0 * num_layers)
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import json
import argparse
import torch
from deepspeed.pipe import PipelineModule, LayerSpec
from deepspeed.moe.layer import MoE
from deepspeed.accelerator import get_accelerator
import deepspeed.comm as dist
class SimpleModel(torch.nn.Module):
def __init__(self, hidden_dim, empty_grad=False, nlayers=1):
super(SimpleModel, self).__init__()
self.linears = torch.nn.ModuleList([torch.nn.Linear(hidden_dim, hidden_dim) for i in range(nlayers)])
if empty_grad:
self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim)
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
self.empty_grad = empty_grad
def forward(self, x, y):
if len(self.linears) == 1:
x = self.linears[0](x)
else:
for i, l in enumerate(self.linears):
x = self.linears[i // 2](x) + l(x)
return self.cross_entropy_loss(x, y)
class SimpleFrozenModel(torch.nn.Module):
def __init__(self, hidden_dim, empty_grad=False):
super(SimpleFrozenModel, self).__init__()
self.linears = torch.nn.ModuleList([torch.nn.Linear(hidden_dim, hidden_dim) for i in range(2)])
if empty_grad:
self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim)
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
self.empty_grad = empty_grad
# Freeze first layer
self.linears[0].weight.requires_grad = False
self.linears[0].bias.requires_grad = False
def forward(self, x, y):
if len(self.linears) == 1:
x = self.linears[0](x)
else:
for i, l in enumerate(self.linears):
x = self.linears[i // 2](x) + l(x)
return self.cross_entropy_loss(x, y)
class Curriculum_SimpleModel(SimpleModel):
def __init__(self, hidden_dim, empty_grad=False):
super(Curriculum_SimpleModel, self).__init__(hidden_dim, empty_grad)
def forward(self, x, y, **kwargs):
seqlen = kwargs.get('curriculum_seqlen', None)
loss = super(Curriculum_SimpleModel, self).forward(x, y)
return loss, seqlen
class SimpleMoEModel(torch.nn.Module):
def __init__(self, hidden_dim, num_experts=4, ep_size=1, use_residual=False):
super(SimpleMoEModel, self).__init__()
self.linear = torch.nn.Linear(hidden_dim, hidden_dim)
expert = torch.nn.Linear(hidden_dim, hidden_dim)
# using two MoE layers to check implications of sharing a single storage
self.linear2 = MoE(hidden_size=hidden_dim,
expert=expert,
ep_size=ep_size,
use_residual=use_residual,
num_experts=num_experts,
k=1)
self.linear3 = MoE(hidden_size=hidden_dim,
expert=expert,
ep_size=ep_size,
use_residual=use_residual,
num_experts=num_experts,
k=1)
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
def forward(self, x, y):
hidden_dim = self.linear(x)
output, _, _ = self.linear2(hidden_dim)
output, _, _ = self.linear3(output)
hidden_dim = hidden_dim + output
sentence_embed = hidden_dim.mean(1)
return self.cross_entropy_loss(sentence_embed, y)
class SimplePRMoEModel(torch.nn.Module):
def __init__(self, hidden_dim, num_experts=2, ep_size=1, use_residual=False):
super(SimplePRMoEModel, self).__init__()
self.linear = torch.nn.Linear(hidden_dim, hidden_dim)
linear2 = torch.nn.Linear(hidden_dim, hidden_dim)
self.linear2 = MoE(hidden_size=hidden_dim,
expert=linear2,
ep_size=ep_size,
use_residual=use_residual,
num_experts=num_experts,
k=1)
linear3 = torch.nn.Linear(hidden_dim, hidden_dim)
self.linear3 = MoE(hidden_size=hidden_dim,
expert=linear3,
ep_size=ep_size,
use_residual=use_residual,
num_experts=int(2 * num_experts),
k=1)
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
def forward(self, x, y):
hidden_dim = x
hidden_dim = self.linear(hidden_dim)
output, _, _ = self.linear2(hidden_dim)
output, _, _ = self.linear3(output)
hidden_dim = hidden_dim + output
sentence_embed = hidden_dim.mean(1)
return self.cross_entropy_loss(sentence_embed, y)
class UnusedParametersModel(SimpleModel):
def __init__(self, hidden_dim, empty_grad=False):
super().__init__(hidden_dim, empty_grad)
self.unused_linear = torch.nn.Linear(hidden_dim, hidden_dim)
class LinearStack(torch.nn.Module):
def __init__(self, input_dim=128, hidden_dim=128, output_dim=128, num_layers=4):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.hidden_dim = hidden_dim
self.input_layer = torch.nn.Linear(in_features=self.input_dim, out_features=self.hidden_dim)
self.layers = torch.nn.ModuleList([
torch.nn.Linear(in_features=self.hidden_dim, out_features=self.hidden_dim, bias=False)
for x in range(num_layers)
])
self.output_layer = torch.nn.Linear(in_features=self.hidden_dim, out_features=self.output_dim)
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
def forward(self, x, y):
x = self.input_layer(x)
for layer in self.layers:
x = layer(x)
x = self.output_layer(x)
return x
class LinearStackPipe(PipelineModule):
def __init__(self, input_dim=128, hidden_dim=128, output_dim=128, num_layers=4, **kwargs):
self.input_dim = input_dim
self.output_dim = output_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
layers = []
layers.append(LayerSpec(torch.nn.Linear, self.input_dim, self.hidden_dim))
for x in range(self.num_layers):
layers.append(LayerSpec(torch.nn.Linear, self.hidden_dim, self.hidden_dim, bias=False))
layers.append(lambda x: x)
layers.append(LayerSpec(torch.nn.Linear, self.hidden_dim, self.output_dim))
super().__init__(layers=layers, loss_fn=torch.nn.CrossEntropyLoss(), **kwargs)
class SimpleOptimizer(torch.optim.Optimizer):
def __init__(self, params, lr=0.11072018):
defaults = dict(lr=lr)
super(SimpleOptimizer, self).__init__(params, defaults)
def __setstate__(self, state):
super(SimpleOptimizer, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
p.data.add_(-group['lr'], d_p)
return loss
class HybridStateOptimizer(torch.optim.Optimizer):
def __init__(self, params, lr=0.11072018):
defaults = dict(lr=lr)
super(HybridStateOptimizer, self).__init__(params, defaults)
def __setstate__(self, state):
super(HybridStateOptimizer, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
state['integer_step'] = 0
state['tensor_step'] = torch.zeros(1, device=p.device)
d_p = p.grad.data
p.data.add_(-group['lr'], d_p)
state['integer_step'] += 1
state['tensor_step'] += 1
return loss
class PLD_SimpleModel(SimpleModel):
def __init__(self, hidden_dim, empty_grad=False):
super(PLD_SimpleModel, self).__init__(hidden_dim, empty_grad)
def forward(self, x, y, **kwargs):
pld = kwargs.get('progressive_layer_drop', False)
theta = kwargs.get('pld_theta', 1.0)
hidden_dim = super(PLD_SimpleModel, self).forward(x, y)
return hidden_dim
def random_dataset(total_samples, hidden_dim, device, dtype=torch.half):
train_data = torch.randn(total_samples, hidden_dim, device=device, dtype=dtype)
train_label = torch.empty(total_samples, dtype=torch.long, device=device).random_(hidden_dim)
train_dataset = torch.utils.data.TensorDataset(train_data, train_label)
return train_dataset
def random_dataloader(model, total_samples, hidden_dim, device, dtype=torch.half):
batch_size = model.train_micro_batch_size_per_gpu()
train_dataset = random_dataset(total_samples, hidden_dim, device, dtype=dtype)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size)
return train_loader
def sequence_dataloader(model, total_samples, hidden_dim, device, seq_len: int = 32, dtype=torch.half):
batch_size = model.train_micro_batch_size_per_gpu()
train_data = torch.randn(total_samples, seq_len, hidden_dim, device=device, dtype=dtype)
train_label = torch.empty(total_samples, dtype=torch.long, device=device).random_(hidden_dim)
train_dataset = torch.utils.data.TensorDataset(train_data, train_label)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size)
return train_loader
def create_config_from_dict(tmpdir, config_dict):
config_path = os.path.join(tmpdir, 'temp_config.json')
with open(config_path, 'w') as fd:
json.dump(config_dict, fd)
return config_path
def create_deepspeed_args():
parser = argparse.ArgumentParser()
args = parser.parse_args(args='')
args.deepspeed = True
if dist.is_initialized():
# We assume up to one full node executing unit tests
assert dist.get_world_size() <= get_accelerator().device_count()
args.local_rank = dist.get_rank()
return args
def args_from_dict(tmpdir, config_dict):
args = create_deepspeed_args()
config_path = create_config_from_dict(tmpdir, config_dict)
args.deepspeed_config = config_path
return args
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.